From a74240482c0395fe0a5029d2dc3342fd72ba64d7 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Tue, 2 Apr 2024 23:29:23 -0400 Subject: [PATCH] [Release] sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/0.3.0 generation from spec commit: 92de53a5f1e0e03c94b40475d2135d97148ed014 (#22690) Co-authored-by: ReleaseHelper --- .../armhdinsightcontainers/CHANGELOG.md | 81 ++ .../armhdinsightcontainers/README.md | 2 +- .../armhdinsightcontainers/autorest.md | 8 +- .../availableclusterpoolversions_client.go | 4 +- ...clusterpoolversions_client_example_test.go | 2 +- .../availableclusterversions_client.go | 4 +- ...ableclusterversions_client_example_test.go | 2 +- .../armhdinsightcontainers/client_factory.go | 66 +- .../clusteravailableupgrades_client.go | 114 ++ ...eravailableupgrades_client_example_test.go | 72 + .../clusterjobs_client.go | 13 +- .../clusterjobs_client_example_test.go | 12 +- .../clusterpoolavailableupgrades_client.go | 109 ++ ...olavailableupgrades_client_example_test.go | 62 + .../clusterpools_client.go | 111 +- .../clusterpools_client_example_test.go | 296 +++- .../armhdinsightcontainers/clusters_client.go | 129 +- .../clusters_client_example_test.go | 1285 ++++++++++++++--- .../armhdinsightcontainers/constants.go | 258 +++- .../fake/clusteravailableupgrades_server.go | 116 ++ .../fake/clusterjobs_server.go | 14 +- .../clusterpoolavailableupgrades_server.go | 112 ++ .../fake/clusterpools_server.go | 56 + .../fake/clusters_server.go | 60 + .../armhdinsightcontainers/fake/internal.go | 8 + .../fake/server_factory.go | 14 + .../fake/time_rfc3339.go | 42 +- .../armhdinsightcontainers/go.mod | 10 +- .../armhdinsightcontainers/go.sum | 22 +- .../armhdinsightcontainers/interfaces.go | 36 + .../locations_client.go | 4 +- .../locations_client_example_test.go | 2 +- .../armhdinsightcontainers/models.go | 559 ++++++- .../armhdinsightcontainers/models_serde.go | 1101 +++++++++++++- .../operations_client.go | 4 +- .../operations_client_example_test.go | 2 +- .../armhdinsightcontainers/options.go | 26 + .../polymorphic_helpers.go | 94 +- .../{response_types.go => responses.go} | 24 + .../armhdinsightcontainers/time_rfc3339.go | 42 +- 40 files changed, 4556 insertions(+), 422 deletions(-) create mode 100644 sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client.go create mode 100644 sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client_example_test.go create mode 100644 sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client.go create mode 100644 sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client_example_test.go create mode 100644 sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusteravailableupgrades_server.go create mode 100644 sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpoolavailableupgrades_server.go rename sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/{response_types.go => responses.go} (84%) diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/CHANGELOG.md b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/CHANGELOG.md index d200f3aecd5d..1057db204f70 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/CHANGELOG.md +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/CHANGELOG.md @@ -1,5 +1,86 @@ # Release History +## 0.3.0 (2024-04-26) +### Breaking Changes + +- Type of `ClusterProfile.KafkaProfile` has been changed from `map[string]any` to `*KafkaProfile` +- Field `ID`, `Location`, `Name`, `SystemData`, `Type` of struct `ClusterPatch` has been removed + +### Features Added + +- New value `ActionLASTSTATEUPDATE`, `ActionRELAUNCH` added to enum type `Action` +- New enum type `ClusterAvailableUpgradeType` with values `ClusterAvailableUpgradeTypeAKSPatchUpgrade`, `ClusterAvailableUpgradeTypeHotfixUpgrade` +- New enum type `ClusterPoolAvailableUpgradeType` with values `ClusterPoolAvailableUpgradeTypeAKSPatchUpgrade`, `ClusterPoolAvailableUpgradeTypeNodeOsUpgrade` +- New enum type `ClusterPoolUpgradeType` with values `ClusterPoolUpgradeTypeAKSPatchUpgrade`, `ClusterPoolUpgradeTypeNodeOsUpgrade` +- New enum type `ClusterUpgradeType` with values `ClusterUpgradeTypeAKSPatchUpgrade`, `ClusterUpgradeTypeHotfixUpgrade` +- New enum type `CurrentClusterAksVersionStatus` with values `CurrentClusterAksVersionStatusDeprecated`, `CurrentClusterAksVersionStatusSupported` +- New enum type `CurrentClusterPoolAksVersionStatus` with values `CurrentClusterPoolAksVersionStatusDeprecated`, `CurrentClusterPoolAksVersionStatusSupported` +- New enum type `DataDiskType` with values `DataDiskTypePremiumSSDLRS`, `DataDiskTypePremiumSSDV2LRS`, `DataDiskTypePremiumSSDZRS`, `DataDiskTypeStandardHDDLRS`, `DataDiskTypeStandardSSDLRS`, `DataDiskTypeStandardSSDZRS` +- New enum type `DbConnectionAuthenticationMode` with values `DbConnectionAuthenticationModeIdentityAuth`, `DbConnectionAuthenticationModeSQLAuth` +- New enum type `DeploymentMode` with values `DeploymentModeApplication`, `DeploymentModeSession` +- New enum type `MetastoreDbConnectionAuthenticationMode` with values `MetastoreDbConnectionAuthenticationModeIdentityAuth`, `MetastoreDbConnectionAuthenticationModeSQLAuth` +- New enum type `OutboundType` with values `OutboundTypeLoadBalancer`, `OutboundTypeUserDefinedRouting` +- New enum type `RangerUsersyncMode` with values `RangerUsersyncModeAutomatic`, `RangerUsersyncModeStatic` +- New enum type `Severity` with values `SeverityCritical`, `SeverityHigh`, `SeverityLow`, `SeverityMedium` +- New enum type `UpgradeMode` with values `UpgradeModeLASTSTATEUPDATE`, `UpgradeModeSTATELESSUPDATE`, `UpgradeModeUPDATE` +- New function `*ClientFactory.NewClusterAvailableUpgradesClient() *ClusterAvailableUpgradesClient` +- New function `*ClientFactory.NewClusterPoolAvailableUpgradesClient() *ClusterPoolAvailableUpgradesClient` +- New function `*ClusterAKSPatchVersionUpgradeProperties.GetClusterUpgradeProperties() *ClusterUpgradeProperties` +- New function `*ClusterAvailableUpgradeAksPatchUpgradeProperties.GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties` +- New function `*ClusterAvailableUpgradeHotfixUpgradeProperties.GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties` +- New function `*ClusterAvailableUpgradeProperties.GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties` +- New function `NewClusterAvailableUpgradesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ClusterAvailableUpgradesClient, error)` +- New function `*ClusterAvailableUpgradesClient.NewListPager(string, string, string, *ClusterAvailableUpgradesClientListOptions) *runtime.Pager[ClusterAvailableUpgradesClientListResponse]` +- New function `*ClusterHotfixUpgradeProperties.GetClusterUpgradeProperties() *ClusterUpgradeProperties` +- New function `*ClusterPoolAKSPatchVersionUpgradeProperties.GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties` +- New function `*ClusterPoolAvailableUpgradeAksPatchUpgradeProperties.GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties` +- New function `*ClusterPoolAvailableUpgradeNodeOsUpgradeProperties.GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties` +- New function `*ClusterPoolAvailableUpgradeProperties.GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties` +- New function `NewClusterPoolAvailableUpgradesClient(string, azcore.TokenCredential, *arm.ClientOptions) (*ClusterPoolAvailableUpgradesClient, error)` +- New function `*ClusterPoolAvailableUpgradesClient.NewListPager(string, string, *ClusterPoolAvailableUpgradesClientListOptions) *runtime.Pager[ClusterPoolAvailableUpgradesClientListResponse]` +- New function `*ClusterPoolNodeOsImageUpdateProperties.GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties` +- New function `*ClusterPoolUpgradeProperties.GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties` +- New function `*ClusterPoolsClient.BeginUpgrade(context.Context, string, string, ClusterPoolUpgrade, *ClusterPoolsClientBeginUpgradeOptions) (*runtime.Poller[ClusterPoolsClientUpgradeResponse], error)` +- New function `*ClusterUpgradeProperties.GetClusterUpgradeProperties() *ClusterUpgradeProperties` +- New function `*ClustersClient.BeginUpgrade(context.Context, string, string, string, ClusterUpgrade, *ClustersClientBeginUpgradeOptions) (*runtime.Poller[ClustersClientUpgradeResponse], error)` +- New struct `ClusterAKSPatchVersionUpgradeProperties` +- New struct `ClusterAccessProfile` +- New struct `ClusterAvailableUpgrade` +- New struct `ClusterAvailableUpgradeAksPatchUpgradeProperties` +- New struct `ClusterAvailableUpgradeHotfixUpgradeProperties` +- New struct `ClusterAvailableUpgradeList` +- New struct `ClusterHotfixUpgradeProperties` +- New struct `ClusterPoolAKSPatchVersionUpgradeProperties` +- New struct `ClusterPoolAvailableUpgrade` +- New struct `ClusterPoolAvailableUpgradeAksPatchUpgradeProperties` +- New struct `ClusterPoolAvailableUpgradeList` +- New struct `ClusterPoolAvailableUpgradeNodeOsUpgradeProperties` +- New struct `ClusterPoolNodeOsImageUpdateProperties` +- New struct `ClusterPoolUpgrade` +- New struct `ClusterRangerPluginProfile` +- New struct `ClusterUpgrade` +- New struct `DiskStorageProfile` +- New struct `FlinkJobProfile` +- New struct `KafkaConnectivityEndpoints` +- New struct `KafkaProfile` +- New struct `RangerAdminSpec` +- New struct `RangerAdminSpecDatabase` +- New struct `RangerAuditSpec` +- New struct `RangerProfile` +- New struct `RangerUsersyncSpec` +- New field `Filter` in struct `ClusterJobsClientListOptions` +- New field `APIServerAuthorizedIPRanges`, `EnablePrivateAPIServer`, `OutboundType` in struct `ClusterPoolResourcePropertiesNetworkProfile` +- New field `ClusterAccessProfile`, `RangerPluginProfile`, `RangerProfile` in struct `ClusterProfile` +- New field `PrivateFqdn` in struct `ConnectivityProfileWeb` +- New field `MetastoreDbConnectionAuthenticationMode` in struct `FlinkHiveCatalogOption` +- New field `RunID` in struct `FlinkJobProperties` +- New field `DeploymentMode`, `JobSpec` in struct `FlinkProfile` +- New field `MetastoreDbConnectionAuthenticationMode` in struct `HiveCatalogOption` +- New field `PrivateSSHEndpoint` in struct `SSHConnectivityEndpoint` +- New field `DbConnectionAuthenticationMode` in struct `SparkMetastoreSpec` +- New field `RangerPluginProfile`, `RangerProfile` in struct `UpdatableClusterProfile` + + ## 0.2.0 (2023-11-24) ### Features Added diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/README.md b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/README.md index 9bec5b7e480d..493da684d342 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/README.md +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/README.md @@ -57,7 +57,7 @@ clientFactory, err := armhdinsightcontainers.NewClientFactory(, A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. ```go -client := clientFactory.NewClustersClient() +client := clientFactory.NewAvailableClusterPoolVersionsClient() ``` ## Fakes diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/autorest.md b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/autorest.md index e1c5411a1023..404c404adc11 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/autorest.md +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/autorest.md @@ -5,9 +5,9 @@ ``` yaml azure-arm: true require: -- https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/readme.md -- https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/readme.go.md +- https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/readme.go.md license-header: MICROSOFT_MIT_NO_VERSION -module-version: 0.2.0 -tag: package-2023-06-preview +module-version: 0.3.0 +tag: package-preview-2023-11 ``` \ No newline at end of file diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client.go index a62409198b8e..0a3641184f29 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client.go @@ -45,7 +45,7 @@ func NewAvailableClusterPoolVersionsClient(subscriptionID string, credential azc // NewListByLocationPager - Returns a list of available cluster pool versions. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - location - The name of the Azure region. // - options - AvailableClusterPoolVersionsClientListByLocationOptions contains the optional parameters for the AvailableClusterPoolVersionsClient.NewListByLocationPager // method. @@ -88,7 +88,7 @@ func (client *AvailableClusterPoolVersionsClient) listByLocationCreateRequest(ct return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client_example_test.go index cad878c5627f..708042e9ec8b 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterpoolversions_client_example_test.go @@ -17,7 +17,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListAvailableClusterPoolVersions.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListAvailableClusterPoolVersions.json func ExampleAvailableClusterPoolVersionsClient_NewListByLocationPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client.go index 87f49c947272..43cdcc237ba7 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client.go @@ -45,7 +45,7 @@ func NewAvailableClusterVersionsClient(subscriptionID string, credential azcore. // NewListByLocationPager - Returns a list of available cluster versions. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - location - The name of the Azure region. // - options - AvailableClusterVersionsClientListByLocationOptions contains the optional parameters for the AvailableClusterVersionsClient.NewListByLocationPager // method. @@ -88,7 +88,7 @@ func (client *AvailableClusterVersionsClient) listByLocationCreateRequest(ctx co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client_example_test.go index b81176ecbe77..d16fcb16e3b6 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/availableclusterversions_client_example_test.go @@ -17,7 +17,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListAvailableClusterVersions.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListAvailableClusterVersions.json func ExampleAvailableClusterVersionsClient_NewListByLocationPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/client_factory.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/client_factory.go index 62dcc00d54b1..361a5c4bd441 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/client_factory.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/client_factory.go @@ -17,8 +17,7 @@ import ( // Don't use this type directly, use NewClientFactory instead. type ClientFactory struct { subscriptionID string - credential azcore.TokenCredential - options *arm.ClientOptions + internal *arm.Client } // NewClientFactory creates a new instance of ClientFactory with the specified values. @@ -27,54 +26,83 @@ type ClientFactory struct { // - credential - used to authorize requests. Usually a credential from azidentity. // - options - pass nil to accept the default values. func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { - _, err := arm.NewClient(moduleName, moduleVersion, credential, options) + internal, err := arm.NewClient(moduleName, moduleVersion, credential, options) if err != nil { return nil, err } return &ClientFactory{ - subscriptionID: subscriptionID, credential: credential, - options: options.Clone(), + subscriptionID: subscriptionID, + internal: internal, }, nil } // NewAvailableClusterPoolVersionsClient creates a new instance of AvailableClusterPoolVersionsClient. func (c *ClientFactory) NewAvailableClusterPoolVersionsClient() *AvailableClusterPoolVersionsClient { - subClient, _ := NewAvailableClusterPoolVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &AvailableClusterPoolVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewAvailableClusterVersionsClient creates a new instance of AvailableClusterVersionsClient. func (c *ClientFactory) NewAvailableClusterVersionsClient() *AvailableClusterVersionsClient { - subClient, _ := NewAvailableClusterVersionsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &AvailableClusterVersionsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } +} + +// NewClusterAvailableUpgradesClient creates a new instance of ClusterAvailableUpgradesClient. +func (c *ClientFactory) NewClusterAvailableUpgradesClient() *ClusterAvailableUpgradesClient { + return &ClusterAvailableUpgradesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewClusterJobsClient creates a new instance of ClusterJobsClient. func (c *ClientFactory) NewClusterJobsClient() *ClusterJobsClient { - subClient, _ := NewClusterJobsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ClusterJobsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } +} + +// NewClusterPoolAvailableUpgradesClient creates a new instance of ClusterPoolAvailableUpgradesClient. +func (c *ClientFactory) NewClusterPoolAvailableUpgradesClient() *ClusterPoolAvailableUpgradesClient { + return &ClusterPoolAvailableUpgradesClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewClusterPoolsClient creates a new instance of ClusterPoolsClient. func (c *ClientFactory) NewClusterPoolsClient() *ClusterPoolsClient { - subClient, _ := NewClusterPoolsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ClusterPoolsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewClustersClient creates a new instance of ClustersClient. func (c *ClientFactory) NewClustersClient() *ClustersClient { - subClient, _ := NewClustersClient(c.subscriptionID, c.credential, c.options) - return subClient + return &ClustersClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewLocationsClient creates a new instance of LocationsClient. func (c *ClientFactory) NewLocationsClient() *LocationsClient { - subClient, _ := NewLocationsClient(c.subscriptionID, c.credential, c.options) - return subClient + return &LocationsClient{ + subscriptionID: c.subscriptionID, + internal: c.internal, + } } // NewOperationsClient creates a new instance of OperationsClient. func (c *ClientFactory) NewOperationsClient() *OperationsClient { - subClient, _ := NewOperationsClient(c.credential, c.options) - return subClient + return &OperationsClient{ + internal: c.internal, + } } diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client.go new file mode 100644 index 000000000000..1f36f8f6774d --- /dev/null +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armhdinsightcontainers + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ClusterAvailableUpgradesClient contains the methods for the ClusterAvailableUpgrades group. +// Don't use this type directly, use NewClusterAvailableUpgradesClient() instead. +type ClusterAvailableUpgradesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewClusterAvailableUpgradesClient creates a new instance of ClusterAvailableUpgradesClient with the specified values. +// - subscriptionID - The ID of the target subscription. The value must be an UUID. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewClusterAvailableUpgradesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClusterAvailableUpgradesClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ClusterAvailableUpgradesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// NewListPager - List a cluster available upgrade. +// +// Generated from API version 2023-11-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - clusterPoolName - The name of the cluster pool. +// - clusterName - The name of the HDInsight cluster. +// - options - ClusterAvailableUpgradesClientListOptions contains the optional parameters for the ClusterAvailableUpgradesClient.NewListPager +// method. +func (client *ClusterAvailableUpgradesClient) NewListPager(resourceGroupName string, clusterPoolName string, clusterName string, options *ClusterAvailableUpgradesClientListOptions) *runtime.Pager[ClusterAvailableUpgradesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[ClusterAvailableUpgradesClientListResponse]{ + More: func(page ClusterAvailableUpgradesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ClusterAvailableUpgradesClientListResponse) (ClusterAvailableUpgradesClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ClusterAvailableUpgradesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, clusterPoolName, clusterName, options) + }, nil) + if err != nil { + return ClusterAvailableUpgradesClientListResponse{}, err + } + return client.listHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listCreateRequest creates the List request. +func (client *ClusterAvailableUpgradesClient) listCreateRequest(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, options *ClusterAvailableUpgradesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusterpools/{clusterPoolName}/clusters/{clusterName}/availableUpgrades" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterPoolName == "" { + return nil, errors.New("parameter clusterPoolName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterPoolName}", url.PathEscape(clusterPoolName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-11-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *ClusterAvailableUpgradesClient) listHandleResponse(resp *http.Response) (ClusterAvailableUpgradesClientListResponse, error) { + result := ClusterAvailableUpgradesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ClusterAvailableUpgradeList); err != nil { + return ClusterAvailableUpgradesClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client_example_test.go new file mode 100644 index 000000000000..8a4f56af14f6 --- /dev/null +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusteravailableupgrades_client_example_test.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armhdinsightcontainers_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterAvailableUpgrades.json +func ExampleClusterAvailableUpgradesClient_NewListPager() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewClusterAvailableUpgradesClient().NewListPager("hiloResourcegroup", "clusterpool1", "cluster1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.ClusterAvailableUpgradeList = armhdinsightcontainers.ClusterAvailableUpgradeList{ + // Value: []*armhdinsightcontainers.ClusterAvailableUpgrade{ + // { + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1/availableUpgrades/AKSPatchUpgrade"), + // Properties: &armhdinsightcontainers.ClusterAvailableUpgradeAksPatchUpgradeProperties{ + // UpgradeType: to.Ptr(armhdinsightcontainers.ClusterAvailableUpgradeTypeAKSPatchUpgrade), + // CurrentVersion: to.Ptr("1.26.3"), + // CurrentVersionStatus: to.Ptr(armhdinsightcontainers.CurrentClusterAksVersionStatusSupported), + // LatestVersion: to.Ptr("1.26.6"), + // }, + // }, + // { + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1/availableUpgrades/hotfix1"), + // Properties: &armhdinsightcontainers.ClusterAvailableUpgradeHotfixUpgradeProperties{ + // UpgradeType: to.Ptr(armhdinsightcontainers.ClusterAvailableUpgradeTypeHotfixUpgrade), + // Description: to.Ptr("Hotfix for historyserver on version 1.16.0-1.0.6.2"), + // ComponentName: to.Ptr("historyserver"), + // CreatedTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2023-03-29T14:13:12.000Z"); return t}()), + // ExtendedProperties: to.Ptr(""), + // Severity: to.Ptr(armhdinsightcontainers.SeverityLow), + // SourceBuildNumber: to.Ptr("2"), + // SourceClusterVersion: to.Ptr("1.0.6"), + // SourceOssVersion: to.Ptr("1.16.0"), + // TargetBuildNumber: to.Ptr("3"), + // TargetClusterVersion: to.Ptr("1.0.6"), + // TargetOssVersion: to.Ptr("1.16.0"), + // }, + // }}, + // } + } +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client.go index 6880899f604b..1630aba79537 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client.go @@ -45,7 +45,7 @@ func NewClusterJobsClient(subscriptionID string, credential azcore.TokenCredenti // NewListPager - Get jobs of HDInsight on AKS cluster. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -97,7 +97,10 @@ func (client *ClusterJobsClient) listCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -115,7 +118,7 @@ func (client *ClusterJobsClient) listHandleResponse(resp *http.Response) (Cluste // BeginRunJob - Operations on jobs of HDInsight on AKS cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -142,7 +145,7 @@ func (client *ClusterJobsClient) BeginRunJob(ctx context.Context, resourceGroupN // RunJob - Operations on jobs of HDInsight on AKS cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClusterJobsClient) runJob(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterJob ClusterJob, options *ClusterJobsClientBeginRunJobOptions) (*http.Response, error) { var err error const operationName = "ClusterJobsClient.BeginRunJob" @@ -188,7 +191,7 @@ func (client *ClusterJobsClient) runJobCreateRequest(ctx context.Context, resour return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, clusterJob); err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client_example_test.go index 382ea1c53fe0..f1e10106370a 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterjobs_client_example_test.go @@ -18,7 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/RunClusterJob.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/RunClusterJob.json func ExampleClusterJobsClient_BeginRunJob() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -65,11 +65,12 @@ func ExampleClusterJobsClient_BeginRunJob() { // JarName: to.Ptr("flink-sleep-job-0.0.1-SNAPSHOT.jar"), // JobJarDirectory: to.Ptr("abfs://flinkjob@hilosa.dfs.core.windows.net/jars"), // JobName: to.Ptr("flink-job-name"), + // RunID: to.Ptr("job-15a-4322-b32c-ea541845e911"), // }, // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListClusterJobs.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterJobs.json func ExampleClusterJobsClient_NewListPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -80,7 +81,7 @@ func ExampleClusterJobsClient_NewListPager() { if err != nil { log.Fatalf("failed to create client: %v", err) } - pager := clientFactory.NewClusterJobsClient().NewListPager("hiloResourcegroup", "clusterPool1", "cluster1", nil) + pager := clientFactory.NewClusterJobsClient().NewListPager("hiloResourcegroup", "clusterPool1", "cluster1", &armhdinsightcontainers.ClusterJobsClientListOptions{Filter: nil}) for pager.More() { page, err := pager.NextPage(ctx) if err != nil { @@ -94,7 +95,7 @@ func ExampleClusterJobsClient_NewListPager() { // page.ClusterJobList = armhdinsightcontainers.ClusterJobList{ // Value: []*armhdinsightcontainers.ClusterJob{ // { - // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1/jobs/flink-job-1"), + // ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1/jobs/flink-job-1"), // Properties: &armhdinsightcontainers.FlinkJobProperties{ // JobType: to.Ptr(armhdinsightcontainers.JobTypeFlinkJob), // ActionResult: to.Ptr("SUCCESS"), @@ -108,7 +109,8 @@ func ExampleClusterJobsClient_NewListPager() { // JobJarDirectory: to.Ptr("jobJarDirectory1"), // JobName: to.Ptr("flink-job-1"), // JobOutput: to.Ptr("job-output"), - // Status: to.Ptr("STOP-FAILED"), + // RunID: to.Ptr("job-15a-4322-b32c-ea541845e911"), + // Status: to.Ptr("RUNNING"), // }, // }}, // } diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client.go new file mode 100644 index 000000000000..1b5ef391f6b1 --- /dev/null +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client.go @@ -0,0 +1,109 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armhdinsightcontainers + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ClusterPoolAvailableUpgradesClient contains the methods for the ClusterPoolAvailableUpgrades group. +// Don't use this type directly, use NewClusterPoolAvailableUpgradesClient() instead. +type ClusterPoolAvailableUpgradesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewClusterPoolAvailableUpgradesClient creates a new instance of ClusterPoolAvailableUpgradesClient with the specified values. +// - subscriptionID - The ID of the target subscription. The value must be an UUID. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewClusterPoolAvailableUpgradesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClusterPoolAvailableUpgradesClient, error) { + cl, err := arm.NewClient(moduleName, moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &ClusterPoolAvailableUpgradesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// NewListPager - List a cluster pool available upgrade. +// +// Generated from API version 2023-11-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - clusterPoolName - The name of the cluster pool. +// - options - ClusterPoolAvailableUpgradesClientListOptions contains the optional parameters for the ClusterPoolAvailableUpgradesClient.NewListPager +// method. +func (client *ClusterPoolAvailableUpgradesClient) NewListPager(resourceGroupName string, clusterPoolName string, options *ClusterPoolAvailableUpgradesClientListOptions) *runtime.Pager[ClusterPoolAvailableUpgradesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[ClusterPoolAvailableUpgradesClientListResponse]{ + More: func(page ClusterPoolAvailableUpgradesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ClusterPoolAvailableUpgradesClientListResponse) (ClusterPoolAvailableUpgradesClientListResponse, error) { + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, "ClusterPoolAvailableUpgradesClient.NewListPager") + nextLink := "" + if page != nil { + nextLink = *page.NextLink + } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.listCreateRequest(ctx, resourceGroupName, clusterPoolName, options) + }, nil) + if err != nil { + return ClusterPoolAvailableUpgradesClientListResponse{}, err + } + return client.listHandleResponse(resp) + }, + Tracer: client.internal.Tracer(), + }) +} + +// listCreateRequest creates the List request. +func (client *ClusterPoolAvailableUpgradesClient) listCreateRequest(ctx context.Context, resourceGroupName string, clusterPoolName string, options *ClusterPoolAvailableUpgradesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusterpools/{clusterPoolName}/availableUpgrades" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if clusterPoolName == "" { + return nil, errors.New("parameter clusterPoolName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterPoolName}", url.PathEscape(clusterPoolName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-11-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *ClusterPoolAvailableUpgradesClient) listHandleResponse(resp *http.Response) (ClusterPoolAvailableUpgradesClientListResponse, error) { + result := ClusterPoolAvailableUpgradesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ClusterPoolAvailableUpgradeList); err != nil { + return ClusterPoolAvailableUpgradesClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client_example_test.go new file mode 100644 index 000000000000..fc184b6057e9 --- /dev/null +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpoolavailableupgrades_client_example_test.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armhdinsightcontainers_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterPoolAvailableUpgrades.json +func ExampleClusterPoolAvailableUpgradesClient_NewListPager() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewClusterPoolAvailableUpgradesClient().NewListPager("hiloResourcegroup", "clusterpool1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.ClusterPoolAvailableUpgradeList = armhdinsightcontainers.ClusterPoolAvailableUpgradeList{ + // Value: []*armhdinsightcontainers.ClusterPoolAvailableUpgrade{ + // { + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/availableUpgrades/AKSPatchUpgrade"), + // Properties: &armhdinsightcontainers.ClusterPoolAvailableUpgradeAksPatchUpgradeProperties{ + // UpgradeType: to.Ptr(armhdinsightcontainers.ClusterPoolAvailableUpgradeTypeAKSPatchUpgrade), + // CurrentVersion: to.Ptr("1.26.3"), + // CurrentVersionStatus: to.Ptr(armhdinsightcontainers.CurrentClusterPoolAksVersionStatusDeprecated), + // LatestVersion: to.Ptr("1.26.6"), + // }, + // }, + // { + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/availableUpgrades/NodeOsUpgrade"), + // Properties: &armhdinsightcontainers.ClusterPoolAvailableUpgradeNodeOsUpgradeProperties{ + // UpgradeType: to.Ptr(armhdinsightcontainers.ClusterPoolAvailableUpgradeTypeNodeOsUpgrade), + // LatestVersion: to.Ptr("AKSCBLMariner-V2gen2-202310.09.0"), + // }, + // }}, + // } + } +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client.go index 78959ba08f77..601606ec6e98 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client.go @@ -46,7 +46,7 @@ func NewClusterPoolsClient(subscriptionID string, credential azcore.TokenCredent // BeginCreateOrUpdate - Creates or updates a cluster pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterPool - The Cluster Pool to create. @@ -73,7 +73,7 @@ func (client *ClusterPoolsClient) BeginCreateOrUpdate(ctx context.Context, resou // CreateOrUpdate - Creates or updates a cluster pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClusterPoolsClient) createOrUpdate(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPool ClusterPool, options *ClusterPoolsClientBeginCreateOrUpdateOptions) (*http.Response, error) { var err error const operationName = "ClusterPoolsClient.BeginCreateOrUpdate" @@ -115,7 +115,7 @@ func (client *ClusterPoolsClient) createOrUpdateCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, clusterPool); err != nil { @@ -127,7 +127,7 @@ func (client *ClusterPoolsClient) createOrUpdateCreateRequest(ctx context.Contex // BeginDelete - Deletes a Cluster Pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - options - ClusterPoolsClientBeginDeleteOptions contains the optional parameters for the ClusterPoolsClient.BeginDelete @@ -152,7 +152,7 @@ func (client *ClusterPoolsClient) BeginDelete(ctx context.Context, resourceGroup // Delete - Deletes a Cluster Pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClusterPoolsClient) deleteOperation(ctx context.Context, resourceGroupName string, clusterPoolName string, options *ClusterPoolsClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ClusterPoolsClient.BeginDelete" @@ -194,7 +194,7 @@ func (client *ClusterPoolsClient) deleteCreateRequest(ctx context.Context, resou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -203,7 +203,7 @@ func (client *ClusterPoolsClient) deleteCreateRequest(ctx context.Context, resou // Get - Gets a cluster pool. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - options - ClusterPoolsClientGetOptions contains the optional parameters for the ClusterPoolsClient.Get method. @@ -249,7 +249,7 @@ func (client *ClusterPoolsClient) getCreateRequest(ctx context.Context, resource return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -266,7 +266,7 @@ func (client *ClusterPoolsClient) getHandleResponse(resp *http.Response) (Cluste // NewListByResourceGroupPager - Lists the HDInsight cluster pools under a resource group. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - options - ClusterPoolsClientListByResourceGroupOptions contains the optional parameters for the ClusterPoolsClient.NewListByResourceGroupPager // method. @@ -309,7 +309,7 @@ func (client *ClusterPoolsClient) listByResourceGroupCreateRequest(ctx context.C return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -326,7 +326,7 @@ func (client *ClusterPoolsClient) listByResourceGroupHandleResponse(resp *http.R // NewListBySubscriptionPager - Gets the list of Cluster Pools within a Subscription. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - options - ClusterPoolsClientListBySubscriptionOptions contains the optional parameters for the ClusterPoolsClient.NewListBySubscriptionPager // method. func (client *ClusterPoolsClient) NewListBySubscriptionPager(options *ClusterPoolsClientListBySubscriptionOptions) *runtime.Pager[ClusterPoolsClientListBySubscriptionResponse] { @@ -364,7 +364,7 @@ func (client *ClusterPoolsClient) listBySubscriptionCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -382,7 +382,7 @@ func (client *ClusterPoolsClient) listBySubscriptionHandleResponse(resp *http.Re // BeginUpdateTags - Updates an existing Cluster Pool Tags. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterPoolTags - Parameters supplied to update tags. @@ -409,7 +409,7 @@ func (client *ClusterPoolsClient) BeginUpdateTags(ctx context.Context, resourceG // UpdateTags - Updates an existing Cluster Pool Tags. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClusterPoolsClient) updateTags(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPoolTags TagsObject, options *ClusterPoolsClientBeginUpdateTagsOptions) (*http.Response, error) { var err error const operationName = "ClusterPoolsClient.BeginUpdateTags" @@ -451,7 +451,7 @@ func (client *ClusterPoolsClient) updateTagsCreateRequest(ctx context.Context, r return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, clusterPoolTags); err != nil { @@ -459,3 +459,84 @@ func (client *ClusterPoolsClient) updateTagsCreateRequest(ctx context.Context, r } return req, nil } + +// BeginUpgrade - Upgrade a cluster pool. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - clusterPoolName - The name of the cluster pool. +// - clusterPoolUpgradeRequest - Upgrade a cluster pool. +// - options - ClusterPoolsClientBeginUpgradeOptions contains the optional parameters for the ClusterPoolsClient.BeginUpgrade +// method. +func (client *ClusterPoolsClient) BeginUpgrade(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPoolUpgradeRequest ClusterPoolUpgrade, options *ClusterPoolsClientBeginUpgradeOptions) (*runtime.Poller[ClusterPoolsClientUpgradeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.upgrade(ctx, resourceGroupName, clusterPoolName, clusterPoolUpgradeRequest, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ClusterPoolsClientUpgradeResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ClusterPoolsClientUpgradeResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Upgrade - Upgrade a cluster pool. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-01-preview +func (client *ClusterPoolsClient) upgrade(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPoolUpgradeRequest ClusterPoolUpgrade, options *ClusterPoolsClientBeginUpgradeOptions) (*http.Response, error) { + var err error + const operationName = "ClusterPoolsClient.BeginUpgrade" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.upgradeCreateRequest(ctx, resourceGroupName, clusterPoolName, clusterPoolUpgradeRequest, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// upgradeCreateRequest creates the Upgrade request. +func (client *ClusterPoolsClient) upgradeCreateRequest(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPoolUpgradeRequest ClusterPoolUpgrade, options *ClusterPoolsClientBeginUpgradeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusterpools/{clusterPoolName}/upgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if clusterPoolName == "" { + return nil, errors.New("parameter clusterPoolName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterPoolName}", url.PathEscape(clusterPoolName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-11-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, clusterPoolUpgradeRequest); err != nil { + return nil, err + } + return req, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client_example_test.go index a92f31c21b04..ace0713bef65 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusterpools_client_example_test.go @@ -18,7 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/GetClusterPool.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/GetClusterPool.json func ExampleClusterPoolsClient_Get() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -73,8 +73,8 @@ func ExampleClusterPoolsClient_Get() { // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/CreateClusterPool.json -func ExampleClusterPoolsClient_BeginCreateOrUpdate() { +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateClusterPool.json +func ExampleClusterPoolsClient_BeginCreateOrUpdate_clusterPoolPut() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -142,7 +142,161 @@ func ExampleClusterPoolsClient_BeginCreateOrUpdate() { // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/PatchClusterPool.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateClusterPoolWithPrivateAks.json +func ExampleClusterPoolsClient_BeginCreateOrUpdate_clusterPoolPutWithPrivateAks() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClusterPoolsClient().BeginCreateOrUpdate(ctx, "hiloResourcegroup", "clusterpool1", armhdinsightcontainers.ClusterPool{ + Location: to.Ptr("West US 2"), + Properties: &armhdinsightcontainers.ClusterPoolResourceProperties{ + ClusterPoolProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesClusterPoolProfile{ + ClusterPoolVersion: to.Ptr("1.2"), + }, + ComputeProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesComputeProfile{ + VMSize: to.Ptr("Standard_D3_v2"), + }, + NetworkProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesNetworkProfile{ + EnablePrivateAPIServer: to.Ptr(true), + SubnetID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1"), + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ClusterPool = armhdinsightcontainers.ClusterPool{ + // Name: to.Ptr("clusterpool1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterPoolResourceProperties{ + // AksClusterProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesAksClusterProfile{ + // AksClusterAgentPoolIdentityProfile: &armhdinsightcontainers.AksClusterProfileAksClusterAgentPoolIdentityProfile{ + // MsiClientID: to.Ptr("a89fb478-2a84-4d9b-8f18-3e8c4d1db3eb"), + // MsiObjectID: to.Ptr("dc7ef861-8b55-4ffb-9003-20885cd895a9"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ManagedIdentity/userAssignedIdentities/clusterpool1-agentpool"), + // }, + // AksClusterResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ContainerService/managedClusters/clusterpool1"), + // AksVersion: to.Ptr("1.24"), + // }, + // ClusterPoolProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesClusterPoolProfile{ + // ClusterPoolVersion: to.Ptr("1.2"), + // }, + // ComputeProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesComputeProfile{ + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_D3_v2"), + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ManagedResourceGroupName: to.Ptr("hdi-45cd32aead6e4a91b079a0cdbfac8c36"), + // NetworkProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesNetworkProfile{ + // EnablePrivateAPIServer: to.Ptr(true), + // SubnetID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1"), + // }, + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateClusterPoolWithUDRAks.json +func ExampleClusterPoolsClient_BeginCreateOrUpdate_clusterPoolPutWithUdrAks() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClusterPoolsClient().BeginCreateOrUpdate(ctx, "hiloResourcegroup", "clusterpool1", armhdinsightcontainers.ClusterPool{ + Location: to.Ptr("West US 2"), + Properties: &armhdinsightcontainers.ClusterPoolResourceProperties{ + ClusterPoolProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesClusterPoolProfile{ + ClusterPoolVersion: to.Ptr("1.2"), + }, + ComputeProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesComputeProfile{ + VMSize: to.Ptr("Standard_D3_v2"), + }, + NetworkProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesNetworkProfile{ + OutboundType: to.Ptr(armhdinsightcontainers.OutboundTypeUserDefinedRouting), + SubnetID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1"), + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ClusterPool = armhdinsightcontainers.ClusterPool{ + // Name: to.Ptr("clusterpool1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterPoolResourceProperties{ + // AksClusterProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesAksClusterProfile{ + // AksClusterAgentPoolIdentityProfile: &armhdinsightcontainers.AksClusterProfileAksClusterAgentPoolIdentityProfile{ + // MsiClientID: to.Ptr("a89fb478-2a84-4d9b-8f18-3e8c4d1db3eb"), + // MsiObjectID: to.Ptr("dc7ef861-8b55-4ffb-9003-20885cd895a9"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ManagedIdentity/userAssignedIdentities/clusterpool1-agentpool"), + // }, + // AksClusterResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ContainerService/managedClusters/clusterpool1"), + // AksVersion: to.Ptr("1.24"), + // }, + // ClusterPoolProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesClusterPoolProfile{ + // ClusterPoolVersion: to.Ptr("1.2"), + // }, + // ComputeProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesComputeProfile{ + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_D3_v2"), + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ManagedResourceGroupName: to.Ptr("hdi-45cd32aead6e4a91b079a0cdbfac8c36"), + // NetworkProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesNetworkProfile{ + // OutboundType: to.Ptr(armhdinsightcontainers.OutboundTypeUserDefinedRouting), + // SubnetID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.Network/virtualNetworks/vnet1/subnets/subnet1"), + // }, + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/PatchClusterPool.json func ExampleClusterPoolsClient_BeginUpdateTags() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -206,7 +360,7 @@ func ExampleClusterPoolsClient_BeginUpdateTags() { // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/DeleteClusterPool.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/DeleteClusterPool.json func ExampleClusterPoolsClient_BeginDelete() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -227,7 +381,7 @@ func ExampleClusterPoolsClient_BeginDelete() { } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListClusterPoolsSubscription.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterPoolsSubscription.json func ExampleClusterPoolsClient_NewListBySubscriptionPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -286,7 +440,7 @@ func ExampleClusterPoolsClient_NewListBySubscriptionPager() { } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListClusterPools.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterPools.json func ExampleClusterPoolsClient_NewListByResourceGroupPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -344,3 +498,131 @@ func ExampleClusterPoolsClient_NewListByResourceGroupPager() { // } } } + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/UpgradeAKSPatchVersionForClusterPool.json +func ExampleClusterPoolsClient_BeginUpgrade_clusterPoolsUpgradeAksPatchVersion() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClusterPoolsClient().BeginUpgrade(ctx, "hiloResourcegroup", "clusterpool1", armhdinsightcontainers.ClusterPoolUpgrade{ + Properties: &armhdinsightcontainers.ClusterPoolAKSPatchVersionUpgradeProperties{ + UpgradeType: to.Ptr(armhdinsightcontainers.ClusterPoolUpgradeTypeAKSPatchUpgrade), + UpgradeAllClusterNodes: to.Ptr(false), + UpgradeClusterPool: to.Ptr(true), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ClusterPool = armhdinsightcontainers.ClusterPool{ + // Name: to.Ptr("clusterpool1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterPoolResourceProperties{ + // AksClusterProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesAksClusterProfile{ + // AksClusterAgentPoolIdentityProfile: &armhdinsightcontainers.AksClusterProfileAksClusterAgentPoolIdentityProfile{ + // MsiClientID: to.Ptr("a89fb478-2a84-4d9b-8f18-3e8c4d1db3eb"), + // MsiObjectID: to.Ptr("dc7ef861-8b55-4ffb-9003-20885cd895a9"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ManagedIdentity/userAssignedIdentities/clusterpool1-agentpool"), + // }, + // AksClusterResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ContainerService/managedClusters/clusterpool1"), + // AksVersion: to.Ptr("1.24"), + // }, + // ClusterPoolProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesClusterPoolProfile{ + // ClusterPoolVersion: to.Ptr("1.2"), + // }, + // ComputeProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesComputeProfile{ + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_D3_v2"), + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ManagedResourceGroupName: to.Ptr("hdi-45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/UpgradeNodeOsForClusterPool.json +func ExampleClusterPoolsClient_BeginUpgrade_clusterPoolsUpgradeNodeOs() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClusterPoolsClient().BeginUpgrade(ctx, "hiloResourcegroup", "clusterpool1", armhdinsightcontainers.ClusterPoolUpgrade{ + Properties: &armhdinsightcontainers.ClusterPoolNodeOsImageUpdateProperties{ + UpgradeType: to.Ptr(armhdinsightcontainers.ClusterPoolUpgradeTypeNodeOsUpgrade), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ClusterPool = armhdinsightcontainers.ClusterPool{ + // Name: to.Ptr("clusterpool1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterPoolResourceProperties{ + // AksClusterProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesAksClusterProfile{ + // AksClusterAgentPoolIdentityProfile: &armhdinsightcontainers.AksClusterProfileAksClusterAgentPoolIdentityProfile{ + // MsiClientID: to.Ptr("a89fb478-2a84-4d9b-8f18-3e8c4d1db3eb"), + // MsiObjectID: to.Ptr("dc7ef861-8b55-4ffb-9003-20885cd895a9"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ManagedIdentity/userAssignedIdentities/clusterpool1-agentpool"), + // }, + // AksClusterResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hdi-45cd32aead6e4a91b079a0cdbfac8c36/providers/Microsoft.ContainerService/managedClusters/clusterpool1"), + // AksVersion: to.Ptr("1.24"), + // }, + // ClusterPoolProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesClusterPoolProfile{ + // ClusterPoolVersion: to.Ptr("1.2"), + // }, + // ComputeProfile: &armhdinsightcontainers.ClusterPoolResourcePropertiesComputeProfile{ + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_D3_v2"), + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ManagedResourceGroupName: to.Ptr("hdi-45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client.go index 37e33e854659..57888f889889 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client.go @@ -46,7 +46,7 @@ func NewClustersClient(subscriptionID string, credential azcore.TokenCredential, // BeginCreate - Creates a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -73,7 +73,7 @@ func (client *ClustersClient) BeginCreate(ctx context.Context, resourceGroupName // Create - Creates a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClustersClient) create(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, hdInsightCluster Cluster, options *ClustersClientBeginCreateOptions) (*http.Response, error) { var err error const operationName = "ClustersClient.BeginCreate" @@ -119,7 +119,7 @@ func (client *ClustersClient) createCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, hdInsightCluster); err != nil { @@ -131,7 +131,7 @@ func (client *ClustersClient) createCreateRequest(ctx context.Context, resourceG // BeginDelete - Deletes a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -157,7 +157,7 @@ func (client *ClustersClient) BeginDelete(ctx context.Context, resourceGroupName // Delete - Deletes a cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClustersClient) deleteOperation(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, options *ClustersClientBeginDeleteOptions) (*http.Response, error) { var err error const operationName = "ClustersClient.BeginDelete" @@ -203,7 +203,7 @@ func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -212,7 +212,7 @@ func (client *ClustersClient) deleteCreateRequest(ctx context.Context, resourceG // Get - Gets a HDInsight cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -263,7 +263,7 @@ func (client *ClustersClient) getCreateRequest(ctx context.Context, resourceGrou return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -281,7 +281,7 @@ func (client *ClustersClient) getHandleResponse(resp *http.Response) (ClustersCl // GetInstanceView - Gets the status of a cluster instance. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -333,7 +333,7 @@ func (client *ClustersClient) getInstanceViewCreateRequest(ctx context.Context, return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -350,7 +350,7 @@ func (client *ClustersClient) getInstanceViewHandleResponse(resp *http.Response) // NewListByClusterPoolNamePager - Lists the HDInsight cluster pools under a resource group. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - options - ClustersClientListByClusterPoolNameOptions contains the optional parameters for the ClustersClient.NewListByClusterPoolNamePager @@ -398,7 +398,7 @@ func (client *ClustersClient) listByClusterPoolNameCreateRequest(ctx context.Con return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -415,7 +415,7 @@ func (client *ClustersClient) listByClusterPoolNameHandleResponse(resp *http.Res // NewListInstanceViewsPager - Lists the lists of instance views // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -468,7 +468,7 @@ func (client *ClustersClient) listInstanceViewsCreateRequest(ctx context.Context return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -485,7 +485,7 @@ func (client *ClustersClient) listInstanceViewsHandleResponse(resp *http.Respons // NewListServiceConfigsPager - Lists the config dump of all services running in cluster. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -538,7 +538,7 @@ func (client *ClustersClient) listServiceConfigsCreateRequest(ctx context.Contex return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil @@ -556,7 +556,7 @@ func (client *ClustersClient) listServiceConfigsHandleResponse(resp *http.Respon // BeginResize - Resize an existing Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -583,7 +583,7 @@ func (client *ClustersClient) BeginResize(ctx context.Context, resourceGroupName // Resize - Resize an existing Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClustersClient) resize(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterResizeRequest ClusterResizeData, options *ClustersClientBeginResizeOptions) (*http.Response, error) { var err error const operationName = "ClustersClient.BeginResize" @@ -629,7 +629,7 @@ func (client *ClustersClient) resizeCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, clusterResizeRequest); err != nil { @@ -641,7 +641,7 @@ func (client *ClustersClient) resizeCreateRequest(ctx context.Context, resourceG // BeginUpdate - Updates an existing Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - resourceGroupName - The name of the resource group. The name is case insensitive. // - clusterPoolName - The name of the cluster pool. // - clusterName - The name of the HDInsight cluster. @@ -668,7 +668,7 @@ func (client *ClustersClient) BeginUpdate(ctx context.Context, resourceGroupName // Update - Updates an existing Cluster. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview func (client *ClustersClient) update(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterPatchRequest ClusterPatch, options *ClustersClientBeginUpdateOptions) (*http.Response, error) { var err error const operationName = "ClustersClient.BeginUpdate" @@ -714,7 +714,7 @@ func (client *ClustersClient) updateCreateRequest(ctx context.Context, resourceG return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, clusterPatchRequest); err != nil { @@ -722,3 +722,88 @@ func (client *ClustersClient) updateCreateRequest(ctx context.Context, resourceG } return req, nil } + +// BeginUpgrade - Upgrade a cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-01-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - clusterPoolName - The name of the cluster pool. +// - clusterName - The name of the HDInsight cluster. +// - clusterUpgradeRequest - Upgrade a cluster. +// - options - ClustersClientBeginUpgradeOptions contains the optional parameters for the ClustersClient.BeginUpgrade method. +func (client *ClustersClient) BeginUpgrade(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterUpgradeRequest ClusterUpgrade, options *ClustersClientBeginUpgradeOptions) (*runtime.Poller[ClustersClientUpgradeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.upgrade(ctx, resourceGroupName, clusterPoolName, clusterName, clusterUpgradeRequest, options) + if err != nil { + return nil, err + } + poller, err := runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[ClustersClientUpgradeResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + Tracer: client.internal.Tracer(), + }) + return poller, err + } else { + return runtime.NewPollerFromResumeToken(options.ResumeToken, client.internal.Pipeline(), &runtime.NewPollerFromResumeTokenOptions[ClustersClientUpgradeResponse]{ + Tracer: client.internal.Tracer(), + }) + } +} + +// Upgrade - Upgrade a cluster. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-11-01-preview +func (client *ClustersClient) upgrade(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterUpgradeRequest ClusterUpgrade, options *ClustersClientBeginUpgradeOptions) (*http.Response, error) { + var err error + const operationName = "ClustersClient.BeginUpgrade" + ctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, operationName) + ctx, endSpan := runtime.StartSpan(ctx, operationName, client.internal.Tracer(), nil) + defer func() { endSpan(err) }() + req, err := client.upgradeCreateRequest(ctx, resourceGroupName, clusterPoolName, clusterName, clusterUpgradeRequest, options) + if err != nil { + return nil, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return nil, err + } + return httpResp, nil +} + +// upgradeCreateRequest creates the Upgrade request. +func (client *ClustersClient) upgradeCreateRequest(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterUpgradeRequest ClusterUpgrade, options *ClustersClientBeginUpgradeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusterpools/{clusterPoolName}/clusters/{clusterName}/upgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if clusterPoolName == "" { + return nil, errors.New("parameter clusterPoolName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterPoolName}", url.PathEscape(clusterPoolName)) + if clusterName == "" { + return nil, errors.New("parameter clusterName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{clusterName}", url.PathEscape(clusterName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2023-11-01-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if err := runtime.MarshalAsJSON(req, clusterUpgradeRequest); err != nil { + return nil, err + } + return req, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client_example_test.go index 331e8221dbd8..f54bc905c90a 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/clusters_client_example_test.go @@ -18,7 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListClustersByClusterPoolName.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClustersByClusterPoolName.json func ExampleClustersClient_NewListByClusterPoolNamePager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -58,11 +58,15 @@ func ExampleClustersClient_NewListByClusterPoolNamePager() { // to.Ptr("testuser1"), // to.Ptr("testuser2")}, // }, - // ClusterVersion: to.Ptr("1.0.1"), + // ClusterVersion: to.Ptr("1.0.6"), // Components: []*armhdinsightcontainers.ClusterComponentsItem{ // { - // Name: to.Ptr("Hive"), - // Version: to.Ptr("2.4.1"), + // Name: to.Ptr("Trino"), + // Version: to.Ptr("410"), + // }, + // { + // Name: to.Ptr("Hive metastore"), + // Version: to.Ptr("3.1.2"), // }}, // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ @@ -81,21 +85,26 @@ func ExampleClustersClient_NewListByClusterPoolNamePager() { // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), // }, - // KafkaProfile: map[string]any{ - // }, - // OssVersion: to.Ptr("2.4.1"), + // OssVersion: to.Ptr("0.410.0"), // SSHProfile: &armhdinsightcontainers.SSHProfile{ // Count: to.Ptr[int32](2), // PodPrefix: to.Ptr("sshnode"), // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ + // }, // }, - // ClusterType: to.Ptr("kafka"), + // ClusterType: to.Ptr("Trino"), // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ // Nodes: []*armhdinsightcontainers.NodeProfile{ // { - // Type: to.Ptr("worker"), - // Count: to.Ptr[int32](4), - // VMSize: to.Ptr("Standard_D3_v2"), + // Type: to.Ptr("Head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }, + // { + // Type: to.Ptr("Worker"), + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_E8as_v5"), // }}, // }, // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), @@ -106,8 +115,8 @@ func ExampleClustersClient_NewListByClusterPoolNamePager() { } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ResizeCluster.json -func ExampleClustersClient_BeginResize() { +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/UpgradeAKSPatchVersionForCluster.json +func ExampleClustersClient_BeginUpgrade_clustersUpgradeAksPatchVersion() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -117,10 +126,9 @@ func ExampleClustersClient_BeginResize() { if err != nil { log.Fatalf("failed to create client: %v", err) } - poller, err := clientFactory.NewClustersClient().BeginResize(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.ClusterResizeData{ - Location: to.Ptr("West US 2"), - Properties: &armhdinsightcontainers.ClusterResizeProperties{ - TargetWorkerNodeCount: to.Ptr[int32](5), + poller, err := clientFactory.NewClustersClient().BeginUpgrade(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.ClusterUpgrade{ + Properties: &armhdinsightcontainers.ClusterAKSPatchVersionUpgradeProperties{ + UpgradeType: to.Ptr(armhdinsightcontainers.ClusterUpgradeTypeAKSPatchUpgrade), }, }, nil) if err != nil { @@ -220,15 +228,15 @@ func ExampleClustersClient_BeginResize() { // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), // }, - // KafkaProfile: map[string]any{ - // }, // OssVersion: to.Ptr("2.4.1"), // SSHProfile: &armhdinsightcontainers.SSHProfile{ // Count: to.Ptr[int32](2), // PodPrefix: to.Ptr("sshnode"), // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ + // }, // }, - // ClusterType: to.Ptr("kafka"), + // ClusterType: to.Ptr("trino"), // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ // Nodes: []*armhdinsightcontainers.NodeProfile{ // { @@ -243,8 +251,8 @@ func ExampleClustersClient_BeginResize() { // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/GetCluster.json -func ExampleClustersClient_Get() { +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/UpgradeHotfixForCluster.json +func ExampleClustersClient_BeginUpgrade_clustersUpgradeHotfix() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -254,10 +262,22 @@ func ExampleClustersClient_Get() { if err != nil { log.Fatalf("failed to create client: %v", err) } - res, err := clientFactory.NewClustersClient().Get(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", nil) + poller, err := clientFactory.NewClustersClient().BeginUpgrade(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.ClusterUpgrade{ + Properties: &armhdinsightcontainers.ClusterHotfixUpgradeProperties{ + UpgradeType: to.Ptr(armhdinsightcontainers.ClusterUpgradeTypeHotfixUpgrade), + ComponentName: to.Ptr("historyserver"), + TargetBuildNumber: to.Ptr("3"), + TargetClusterVersion: to.Ptr("1.0.6"), + TargetOssVersion: to.Ptr("1.16.0"), + }, + }, nil) if err != nil { log.Fatalf("failed to finish the request: %v", err) } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } // You could use response here. We use blank identifier for just demo purposes. _ = res // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. @@ -281,54 +301,98 @@ func ExampleClustersClient_Get() { // to.Ptr("testuser1"), // to.Ptr("testuser2")}, // }, - // ClusterVersion: to.Ptr("1.0.1"), - // Components: []*armhdinsightcontainers.ClusterComponentsItem{ - // { - // Name: to.Ptr("Hive"), - // Version: to.Ptr("2.4.1"), - // }}, - // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ - // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ - // { - // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // AutoscaleProfile: &armhdinsightcontainers.AutoscaleProfile{ + // AutoscaleType: to.Ptr(armhdinsightcontainers.AutoscaleTypeScheduleBased), + // Enabled: to.Ptr(true), + // GracefulDecommissionTimeout: to.Ptr[int32](3600), + // LoadBasedConfig: &armhdinsightcontainers.LoadBasedConfig{ + // CooldownPeriod: to.Ptr[int32](300), + // MaxNodes: to.Ptr[int32](20), + // MinNodes: to.Ptr[int32](10), + // PollInterval: to.Ptr[int32](60), + // ScalingRules: []*armhdinsightcontainers.ScalingRule{ + // { + // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaleup), + // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorGreaterThan), + // Threshold: to.Ptr[float32](90), + // }, + // EvaluationCount: to.Ptr[int32](3), + // ScalingMetric: to.Ptr("cpu"), + // }, + // { + // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaledown), + // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorLessThan), + // Threshold: to.Ptr[float32](20), + // }, + // EvaluationCount: to.Ptr[int32](3), + // ScalingMetric: to.Ptr("cpu"), + // }}, + // }, + // ScheduleBasedConfig: &armhdinsightcontainers.ScheduleBasedConfig{ + // DefaultCount: to.Ptr[int32](10), + // Schedules: []*armhdinsightcontainers.Schedule{ + // { + // Count: to.Ptr[int32](20), + // Days: []*armhdinsightcontainers.ScheduleDay{ + // to.Ptr(armhdinsightcontainers.ScheduleDayMonday)}, + // EndTime: to.Ptr("12:00"), + // StartTime: to.Ptr("00:00"), + // }, + // { + // Count: to.Ptr[int32](25), + // Days: []*armhdinsightcontainers.ScheduleDay{ + // to.Ptr(armhdinsightcontainers.ScheduleDaySunday)}, + // EndTime: to.Ptr("12:00"), + // StartTime: to.Ptr("00:00"), + // }}, + // TimeZone: to.Ptr("Cen. Australia Standard Time"), + // }, + // }, + // ClusterVersion: to.Ptr("1.0.1"), + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // }, + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), + // }}, + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // }, + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // OssVersion: to.Ptr("2.4.1"), + // SSHProfile: &armhdinsightcontainers.SSHProfile{ + // Count: to.Ptr[int32](2), + // PodPrefix: to.Ptr("sshnode"), + // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ // }, - // { - // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), - // }}, - // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ - // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), // }, + // ClusterType: to.Ptr("trino"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("worker"), + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_D3_v2"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), // }, - // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ - // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), - // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), - // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), - // }, - // KafkaProfile: map[string]any{ - // }, - // OssVersion: to.Ptr("2.4.1"), - // SSHProfile: &armhdinsightcontainers.SSHProfile{ - // Count: to.Ptr[int32](2), - // PodPrefix: to.Ptr("sshnode"), - // }, - // }, - // ClusterType: to.Ptr("kafka"), - // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ - // Nodes: []*armhdinsightcontainers.NodeProfile{ - // { - // Type: to.Ptr("worker"), - // Count: to.Ptr[int32](4), - // VMSize: to.Ptr("Standard_D3_v2"), - // }}, - // }, - // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), - // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), - // }, - // } + // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/CreateAutoscaleCluster.json -func ExampleClustersClient_BeginCreate_hdInsightClusterPut() { +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ResizeCluster.json +func ExampleClustersClient_BeginResize() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -338,77 +402,687 @@ func ExampleClustersClient_BeginCreate_hdInsightClusterPut() { if err != nil { log.Fatalf("failed to create client: %v", err) } - poller, err := clientFactory.NewClustersClient().BeginCreate(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.Cluster{ + poller, err := clientFactory.NewClustersClient().BeginResize(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.ClusterResizeData{ Location: to.Ptr("West US 2"), - Properties: &armhdinsightcontainers.ClusterResourceProperties{ - ClusterProfile: &armhdinsightcontainers.ClusterProfile{ - AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ - UserIDs: []*string{ - to.Ptr("testuser1"), - to.Ptr("testuser2")}, - }, - AutoscaleProfile: &armhdinsightcontainers.AutoscaleProfile{ - AutoscaleType: to.Ptr(armhdinsightcontainers.AutoscaleTypeScheduleBased), - Enabled: to.Ptr(true), - GracefulDecommissionTimeout: to.Ptr[int32](3600), - LoadBasedConfig: &armhdinsightcontainers.LoadBasedConfig{ - CooldownPeriod: to.Ptr[int32](300), - MaxNodes: to.Ptr[int32](20), - MinNodes: to.Ptr[int32](10), - PollInterval: to.Ptr[int32](60), - ScalingRules: []*armhdinsightcontainers.ScalingRule{ - { - ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaleup), - ComparisonRule: &armhdinsightcontainers.ComparisonRule{ - Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorGreaterThan), - Threshold: to.Ptr[float32](90), - }, - EvaluationCount: to.Ptr[int32](3), - ScalingMetric: to.Ptr("cpu"), - }, - { - ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaledown), - ComparisonRule: &armhdinsightcontainers.ComparisonRule{ - Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorLessThan), - Threshold: to.Ptr[float32](20), - }, - EvaluationCount: to.Ptr[int32](3), - ScalingMetric: to.Ptr("cpu"), - }}, - }, - ScheduleBasedConfig: &armhdinsightcontainers.ScheduleBasedConfig{ - DefaultCount: to.Ptr[int32](10), - Schedules: []*armhdinsightcontainers.Schedule{ - { - Count: to.Ptr[int32](20), - Days: []*armhdinsightcontainers.ScheduleDay{ - to.Ptr(armhdinsightcontainers.ScheduleDayMonday)}, - EndTime: to.Ptr("12:00"), - StartTime: to.Ptr("00:00"), - }, - { - Count: to.Ptr[int32](25), - Days: []*armhdinsightcontainers.ScheduleDay{ - to.Ptr(armhdinsightcontainers.ScheduleDaySunday)}, - EndTime: to.Ptr("12:00"), - StartTime: to.Ptr("00:00"), - }}, - TimeZone: to.Ptr("Cen. Australia Standard Time"), - }, - }, - ClusterVersion: to.Ptr("1.0.1"), - IdentityProfile: &armhdinsightcontainers.IdentityProfile{ - MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), - MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), - MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), - }, - KafkaProfile: map[string]any{}, - OssVersion: to.Ptr("2.4.1"), + Properties: &armhdinsightcontainers.ClusterResizeProperties{ + TargetWorkerNodeCount: to.Ptr[int32](5), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Cluster = armhdinsightcontainers.Cluster{ + // Name: to.Ptr("cluster1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools/clusters"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterResourceProperties{ + // ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + // AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + // UserIDs: []*string{ + // to.Ptr("testuser1"), + // to.Ptr("testuser2")}, + // }, + // AutoscaleProfile: &armhdinsightcontainers.AutoscaleProfile{ + // AutoscaleType: to.Ptr(armhdinsightcontainers.AutoscaleTypeScheduleBased), + // Enabled: to.Ptr(true), + // GracefulDecommissionTimeout: to.Ptr[int32](3600), + // LoadBasedConfig: &armhdinsightcontainers.LoadBasedConfig{ + // CooldownPeriod: to.Ptr[int32](300), + // MaxNodes: to.Ptr[int32](20), + // MinNodes: to.Ptr[int32](10), + // PollInterval: to.Ptr[int32](60), + // ScalingRules: []*armhdinsightcontainers.ScalingRule{ + // { + // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaleup), + // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorGreaterThan), + // Threshold: to.Ptr[float32](90), + // }, + // EvaluationCount: to.Ptr[int32](3), + // ScalingMetric: to.Ptr("cpu"), + // }, + // { + // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaledown), + // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorLessThan), + // Threshold: to.Ptr[float32](20), + // }, + // EvaluationCount: to.Ptr[int32](3), + // ScalingMetric: to.Ptr("cpu"), + // }}, + // }, + // ScheduleBasedConfig: &armhdinsightcontainers.ScheduleBasedConfig{ + // DefaultCount: to.Ptr[int32](10), + // Schedules: []*armhdinsightcontainers.Schedule{ + // { + // Count: to.Ptr[int32](20), + // Days: []*armhdinsightcontainers.ScheduleDay{ + // to.Ptr(armhdinsightcontainers.ScheduleDayMonday)}, + // EndTime: to.Ptr("12:00"), + // StartTime: to.Ptr("00:00"), + // }, + // { + // Count: to.Ptr[int32](25), + // Days: []*armhdinsightcontainers.ScheduleDay{ + // to.Ptr(armhdinsightcontainers.ScheduleDaySunday)}, + // EndTime: to.Ptr("12:00"), + // StartTime: to.Ptr("00:00"), + // }}, + // TimeZone: to.Ptr("Cen. Australia Standard Time"), + // }, + // }, + // ClusterVersion: to.Ptr("1.0.6"), + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // }, + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), + // }}, + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // }, + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // OssVersion: to.Ptr("0.410.0"), + // SSHProfile: &armhdinsightcontainers.SSHProfile{ + // Count: to.Ptr[int32](2), + // PodPrefix: to.Ptr("sshnode"), + // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ + // }, + // }, + // ClusterType: to.Ptr("Trino"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("Head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }, + // { + // Type: to.Ptr("Worker"), + // Count: to.Ptr[int32](5), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/GetCluster.json +func ExampleClustersClient_Get() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewClustersClient().Get(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Cluster = armhdinsightcontainers.Cluster{ + // Name: to.Ptr("cluster1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools/clusters"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterResourceProperties{ + // ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + // AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + // UserIDs: []*string{ + // to.Ptr("testuser1"), + // to.Ptr("testuser2")}, + // }, + // ClusterVersion: to.Ptr("1.0.6"), + // Components: []*armhdinsightcontainers.ClusterComponentsItem{ + // { + // Name: to.Ptr("Trino"), + // Version: to.Ptr("410"), + // }, + // { + // Name: to.Ptr("Hive metastore"), + // Version: to.Ptr("3.1.2"), + // }}, + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // }, + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), + // }}, + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // }, + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // OssVersion: to.Ptr("0.410.0"), + // SSHProfile: &armhdinsightcontainers.SSHProfile{ + // Count: to.Ptr[int32](2), + // PodPrefix: to.Ptr("sshnode"), + // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ + // }, + // }, + // ClusterType: to.Ptr("Trino"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("Head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }, + // { + // Type: to.Ptr("Worker"), + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateAutoscaleCluster.json +func ExampleClustersClient_BeginCreate_hdInsightClusterPut() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClustersClient().BeginCreate(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.Cluster{ + Location: to.Ptr("West US 2"), + Properties: &armhdinsightcontainers.ClusterResourceProperties{ + ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + UserIDs: []*string{ + to.Ptr("testuser1"), + to.Ptr("testuser2")}, + }, + AutoscaleProfile: &armhdinsightcontainers.AutoscaleProfile{ + AutoscaleType: to.Ptr(armhdinsightcontainers.AutoscaleTypeScheduleBased), + Enabled: to.Ptr(true), + GracefulDecommissionTimeout: to.Ptr[int32](3600), + LoadBasedConfig: &armhdinsightcontainers.LoadBasedConfig{ + CooldownPeriod: to.Ptr[int32](300), + MaxNodes: to.Ptr[int32](20), + MinNodes: to.Ptr[int32](10), + PollInterval: to.Ptr[int32](60), + ScalingRules: []*armhdinsightcontainers.ScalingRule{ + { + ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaleup), + ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorGreaterThan), + Threshold: to.Ptr[float32](90), + }, + EvaluationCount: to.Ptr[int32](3), + ScalingMetric: to.Ptr("cpu"), + }, + { + ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaledown), + ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorLessThan), + Threshold: to.Ptr[float32](20), + }, + EvaluationCount: to.Ptr[int32](3), + ScalingMetric: to.Ptr("cpu"), + }}, + }, + ScheduleBasedConfig: &armhdinsightcontainers.ScheduleBasedConfig{ + DefaultCount: to.Ptr[int32](10), + Schedules: []*armhdinsightcontainers.Schedule{ + { + Count: to.Ptr[int32](20), + Days: []*armhdinsightcontainers.ScheduleDay{ + to.Ptr(armhdinsightcontainers.ScheduleDayMonday)}, + EndTime: to.Ptr("12:00"), + StartTime: to.Ptr("00:00"), + }, + { + Count: to.Ptr[int32](25), + Days: []*armhdinsightcontainers.ScheduleDay{ + to.Ptr(armhdinsightcontainers.ScheduleDaySunday)}, + EndTime: to.Ptr("12:00"), + StartTime: to.Ptr("00:00"), + }}, + TimeZone: to.Ptr("Cen. Australia Standard Time"), + }, + }, + ClusterVersion: to.Ptr("1.0.6"), + IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + }, + OssVersion: to.Ptr("0.410.0"), + SSHProfile: &armhdinsightcontainers.SSHProfile{ + Count: to.Ptr[int32](2), + }, + TrinoProfile: &armhdinsightcontainers.TrinoProfile{}, + }, + ClusterType: to.Ptr("Trino"), + ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + Nodes: []*armhdinsightcontainers.NodeProfile{ + { + Type: to.Ptr("Head"), + Count: to.Ptr[int32](2), + VMSize: to.Ptr("Standard_E8as_v5"), + }, + { + Type: to.Ptr("Worker"), + Count: to.Ptr[int32](3), + VMSize: to.Ptr("Standard_E8as_v5"), + }}, + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Cluster = armhdinsightcontainers.Cluster{ + // Name: to.Ptr("cluster1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools/clusters"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterResourceProperties{ + // ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + // AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + // UserIDs: []*string{ + // to.Ptr("testuser1"), + // to.Ptr("testuser2")}, + // }, + // AutoscaleProfile: &armhdinsightcontainers.AutoscaleProfile{ + // AutoscaleType: to.Ptr(armhdinsightcontainers.AutoscaleTypeScheduleBased), + // Enabled: to.Ptr(true), + // GracefulDecommissionTimeout: to.Ptr[int32](3600), + // LoadBasedConfig: &armhdinsightcontainers.LoadBasedConfig{ + // CooldownPeriod: to.Ptr[int32](300), + // MaxNodes: to.Ptr[int32](20), + // MinNodes: to.Ptr[int32](10), + // PollInterval: to.Ptr[int32](60), + // ScalingRules: []*armhdinsightcontainers.ScalingRule{ + // { + // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaleup), + // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorGreaterThan), + // Threshold: to.Ptr[float32](90), + // }, + // EvaluationCount: to.Ptr[int32](3), + // ScalingMetric: to.Ptr("cpu"), + // }, + // { + // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaledown), + // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ + // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorLessThan), + // Threshold: to.Ptr[float32](20), + // }, + // EvaluationCount: to.Ptr[int32](3), + // ScalingMetric: to.Ptr("cpu"), + // }}, + // }, + // ScheduleBasedConfig: &armhdinsightcontainers.ScheduleBasedConfig{ + // DefaultCount: to.Ptr[int32](10), + // Schedules: []*armhdinsightcontainers.Schedule{ + // { + // Count: to.Ptr[int32](20), + // Days: []*armhdinsightcontainers.ScheduleDay{ + // to.Ptr(armhdinsightcontainers.ScheduleDayMonday)}, + // EndTime: to.Ptr("12:00"), + // StartTime: to.Ptr("00:00"), + // }, + // { + // Count: to.Ptr[int32](25), + // Days: []*armhdinsightcontainers.ScheduleDay{ + // to.Ptr(armhdinsightcontainers.ScheduleDaySunday)}, + // EndTime: to.Ptr("12:00"), + // StartTime: to.Ptr("00:00"), + // }}, + // TimeZone: to.Ptr("Cen. Australia Standard Time"), + // }, + // }, + // ClusterVersion: to.Ptr("1.0.6"), + // Components: []*armhdinsightcontainers.ClusterComponentsItem{ + // { + // Name: to.Ptr("Trino"), + // Version: to.Ptr("410"), + // }, + // { + // Name: to.Ptr("Hive metastore"), + // Version: to.Ptr("3.1.2"), + // }}, + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // }, + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), + // }}, + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // }, + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // OssVersion: to.Ptr("0.410.0"), + // SSHProfile: &armhdinsightcontainers.SSHProfile{ + // Count: to.Ptr[int32](2), + // PodPrefix: to.Ptr("sshnode"), + // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ + // }, + // }, + // ClusterType: to.Ptr("Trino"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("Head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }, + // { + // Type: to.Ptr("Worker"), + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateRangerCluster.json +func ExampleClustersClient_BeginCreate_hdInsightRangerClusterPut() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClustersClient().BeginCreate(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.Cluster{ + Location: to.Ptr("West US 2"), + Properties: &armhdinsightcontainers.ClusterResourceProperties{ + ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + UserIDs: []*string{ + to.Ptr("testuser1"), + to.Ptr("testuser2")}, + }, + ClusterVersion: to.Ptr("0.0.1"), + IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + }, + OssVersion: to.Ptr("2.2.3"), + RangerProfile: &armhdinsightcontainers.RangerProfile{ + RangerAdmin: &armhdinsightcontainers.RangerAdminSpec{ + Admins: []*string{ + to.Ptr("testuser1@contoso.com"), + to.Ptr("testuser2@contoso.com")}, + Database: &armhdinsightcontainers.RangerAdminSpecDatabase{ + Name: to.Ptr("testdb"), + Host: to.Ptr("testsqlserver.database.windows.net"), + PasswordSecretRef: to.Ptr("https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452"), + Username: to.Ptr("admin"), + }, + }, + RangerAudit: &armhdinsightcontainers.RangerAuditSpec{ + StorageAccount: to.Ptr("https://teststorage.blob.core.windows.net/testblob"), + }, + RangerUsersync: &armhdinsightcontainers.RangerUsersyncSpec{ + Enabled: to.Ptr(true), + Groups: []*string{ + to.Ptr("0a53828f-36c9-44c3-be3d-99a7fce977ad"), + to.Ptr("13be6971-79db-4f33-9d41-b25589ca25ac")}, + Mode: to.Ptr(armhdinsightcontainers.RangerUsersyncModeAutomatic), + Users: []*string{ + to.Ptr("testuser1@contoso.com"), + to.Ptr("testuser2@contoso.com")}, + }, + }, + }, + ClusterType: to.Ptr("ranger"), + ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + Nodes: []*armhdinsightcontainers.NodeProfile{ + { + Type: to.Ptr("head"), + Count: to.Ptr[int32](2), + VMSize: to.Ptr("Standard_D3_v2"), + }}, + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Cluster = armhdinsightcontainers.Cluster{ + // Name: to.Ptr("cluster1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools/clusters"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Properties: &armhdinsightcontainers.ClusterResourceProperties{ + // ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + // AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + // UserIDs: []*string{ + // to.Ptr("testuser1"), + // to.Ptr("testuser2")}, + // }, + // ClusterVersion: to.Ptr("0.0.1"), + // Components: []*armhdinsightcontainers.ClusterComponentsItem{ + // { + // Name: to.Ptr("HDFS"), + // Version: to.Ptr("2.2.3"), + // }}, + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // }, + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // OssVersion: to.Ptr("2.2.3"), + // RangerProfile: &armhdinsightcontainers.RangerProfile{ + // RangerAdmin: &armhdinsightcontainers.RangerAdminSpec{ + // Admins: []*string{ + // to.Ptr("testuser1@contoso.com"), + // to.Ptr("testuser2@contoso.com")}, + // Database: &armhdinsightcontainers.RangerAdminSpecDatabase{ + // Name: to.Ptr("testdb"), + // Host: to.Ptr("testsqlserver.database.windows.net"), + // PasswordSecretRef: to.Ptr("https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452"), + // Username: to.Ptr("admin"), + // }, + // }, + // RangerAudit: &armhdinsightcontainers.RangerAuditSpec{ + // StorageAccount: to.Ptr("https://teststorage.blob.core.windows.net/testblob"), + // }, + // RangerUsersync: &armhdinsightcontainers.RangerUsersyncSpec{ + // Enabled: to.Ptr(true), + // Groups: []*string{ + // to.Ptr("0a53828f-36c9-44c3-be3d-99a7fce977ad"), + // to.Ptr("13be6971-79db-4f33-9d41-b25589ca25ac")}, + // Mode: to.Ptr(armhdinsightcontainers.RangerUsersyncModeAutomatic), + // Users: []*string{ + // to.Ptr("testuser1@contoso.com"), + // to.Ptr("testuser2@contoso.com")}, + // }, + // }, + // }, + // ClusterType: to.Ptr("ranger"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_D3_v2"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateSparkCluster.json +func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPut() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClustersClient().BeginCreate(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.Cluster{ + Location: to.Ptr("West US 2"), + Properties: &armhdinsightcontainers.ClusterResourceProperties{ + ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + UserIDs: []*string{ + to.Ptr("testuser1"), + to.Ptr("testuser2")}, + }, + ClusterVersion: to.Ptr("0.0.1"), + IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + }, + OssVersion: to.Ptr("2.2.3"), + ServiceConfigsProfiles: []*armhdinsightcontainers.ClusterServiceConfigsProfile{ + { + Configs: []*armhdinsightcontainers.ClusterServiceConfig{ + { + Component: to.Ptr("spark-config"), + Files: []*armhdinsightcontainers.ClusterConfigFile{ + { + FileName: to.Ptr("spark-defaults.conf"), + Values: map[string]*string{ + "spark.eventLog.enabled": to.Ptr("true"), + }, + }}, + }}, + ServiceName: to.Ptr("spark-service"), + }, + { + Configs: []*armhdinsightcontainers.ClusterServiceConfig{ + { + Component: to.Ptr("yarn-config"), + Files: []*armhdinsightcontainers.ClusterConfigFile{ + { + FileName: to.Ptr("core-site.xml"), + Values: map[string]*string{ + "fs.defaultFS": to.Ptr("wasb://testcontainer@teststorage.dfs.core.windows.net/"), + "storage.container": to.Ptr("testcontainer"), + "storage.key": to.Ptr("test key"), + "storage.name": to.Ptr("teststorage"), + "storage.protocol": to.Ptr("wasb"), + }, + }, + { + FileName: to.Ptr("yarn-site.xml"), + Values: map[string]*string{ + "yarn.webapp.ui2.enable": to.Ptr("false"), + }, + }}, + }}, + ServiceName: to.Ptr("yarn-service"), + }}, + SparkProfile: &armhdinsightcontainers.SparkProfile{}, SSHProfile: &armhdinsightcontainers.SSHProfile{ Count: to.Ptr[int32](2), }, }, - ClusterType: to.Ptr("kafka"), + ClusterType: to.Ptr("spark"), ComputeProfile: &armhdinsightcontainers.ComputeProfile{ Nodes: []*armhdinsightcontainers.NodeProfile{ { @@ -449,103 +1123,93 @@ func ExampleClustersClient_BeginCreate_hdInsightClusterPut() { // to.Ptr("testuser1"), // to.Ptr("testuser2")}, // }, - // AutoscaleProfile: &armhdinsightcontainers.AutoscaleProfile{ - // AutoscaleType: to.Ptr(armhdinsightcontainers.AutoscaleTypeScheduleBased), - // Enabled: to.Ptr(true), - // GracefulDecommissionTimeout: to.Ptr[int32](3600), - // LoadBasedConfig: &armhdinsightcontainers.LoadBasedConfig{ - // CooldownPeriod: to.Ptr[int32](300), - // MaxNodes: to.Ptr[int32](20), - // MinNodes: to.Ptr[int32](10), - // PollInterval: to.Ptr[int32](60), - // ScalingRules: []*armhdinsightcontainers.ScalingRule{ - // { - // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaleup), - // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ - // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorGreaterThan), - // Threshold: to.Ptr[float32](90), - // }, - // EvaluationCount: to.Ptr[int32](3), - // ScalingMetric: to.Ptr("cpu"), - // }, - // { - // ActionType: to.Ptr(armhdinsightcontainers.ScaleActionTypeScaledown), - // ComparisonRule: &armhdinsightcontainers.ComparisonRule{ - // Operator: to.Ptr(armhdinsightcontainers.ComparisonOperatorLessThan), - // Threshold: to.Ptr[float32](20), - // }, - // EvaluationCount: to.Ptr[int32](3), - // ScalingMetric: to.Ptr("cpu"), - // }}, + // ClusterVersion: to.Ptr("0.0.1"), + // Components: []*armhdinsightcontainers.ClusterComponentsItem{ + // { + // Name: to.Ptr("HDFS"), + // Version: to.Ptr("2.2.3"), + // }}, + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // }, + // { + // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), + // }}, + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), // }, - // ScheduleBasedConfig: &armhdinsightcontainers.ScheduleBasedConfig{ - // DefaultCount: to.Ptr[int32](10), - // Schedules: []*armhdinsightcontainers.Schedule{ + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // OssVersion: to.Ptr("2.2.3"), + // ServiceConfigsProfiles: []*armhdinsightcontainers.ClusterServiceConfigsProfile{ + // { + // Configs: []*armhdinsightcontainers.ClusterServiceConfig{ // { - // Count: to.Ptr[int32](20), - // Days: []*armhdinsightcontainers.ScheduleDay{ - // to.Ptr(armhdinsightcontainers.ScheduleDayMonday)}, - // EndTime: to.Ptr("12:00"), - // StartTime: to.Ptr("00:00"), - // }, - // { - // Count: to.Ptr[int32](25), - // Days: []*armhdinsightcontainers.ScheduleDay{ - // to.Ptr(armhdinsightcontainers.ScheduleDaySunday)}, - // EndTime: to.Ptr("12:00"), - // StartTime: to.Ptr("00:00"), + // Component: to.Ptr("spark-config"), + // Files: []*armhdinsightcontainers.ClusterConfigFile{ + // { + // FileName: to.Ptr("spark-defaults.conf"), + // Values: map[string]*string{ + // "spark.eventLog.enabled": to.Ptr("true"), + // }, // }}, - // TimeZone: to.Ptr("Cen. Australia Standard Time"), - // }, - // }, - // ClusterVersion: to.Ptr("1.0.1"), - // Components: []*armhdinsightcontainers.ClusterComponentsItem{ - // { - // Name: to.Ptr("Hive"), - // Version: to.Ptr("2.4.1"), // }}, - // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ - // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ - // { - // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), - // }, - // { - // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), - // }}, - // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ - // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), - // }, - // }, - // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ - // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), - // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), - // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), - // }, - // KafkaProfile: map[string]any{ - // }, - // OssVersion: to.Ptr("2.4.1"), - // SSHProfile: &armhdinsightcontainers.SSHProfile{ - // Count: to.Ptr[int32](2), - // PodPrefix: to.Ptr("sshnode"), - // }, + // ServiceName: to.Ptr("spark-service"), // }, - // ClusterType: to.Ptr("kafka"), - // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ - // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Configs: []*armhdinsightcontainers.ClusterServiceConfig{ // { - // Type: to.Ptr("worker"), - // Count: to.Ptr[int32](4), - // VMSize: to.Ptr("Standard_D3_v2"), + // Component: to.Ptr("yarn-config"), + // Files: []*armhdinsightcontainers.ClusterConfigFile{ + // { + // FileName: to.Ptr("core-site.xml"), + // Values: map[string]*string{ + // "fs.defaultFS": to.Ptr("wasb://testcontainer@teststorage.dfs.core.windows.net/"), + // "storage.container": to.Ptr("testcontainer"), + // "storage.key": to.Ptr("test key"), + // "storage.name": to.Ptr("teststorage"), + // "storage.protocol": to.Ptr("wasb"), + // }, + // }, + // { + // FileName: to.Ptr("yarn-site.xml"), + // Values: map[string]*string{ + // "yarn.webapp.ui2.enable": to.Ptr("false"), + // }, + // }}, // }}, - // }, - // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), - // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // ServiceName: to.Ptr("yarn-service"), + // }}, + // SparkProfile: &armhdinsightcontainers.SparkProfile{ // }, - // } + // SSHProfile: &armhdinsightcontainers.SSHProfile{ + // Count: to.Ptr[int32](2), + // PodPrefix: to.Ptr("sshnode"), + // }, + // }, + // ClusterType: to.Ptr("spark"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("worker"), + // Count: to.Ptr[int32](4), + // VMSize: to.Ptr("Standard_D3_v2"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/CreateSparkCluster.json -func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPut() { +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/CreateSparkClusterWithInternalIngress.json +func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPutWithInternalIngress() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -564,6 +1228,9 @@ func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPut() { to.Ptr("testuser1"), to.Ptr("testuser2")}, }, + ClusterAccessProfile: &armhdinsightcontainers.ClusterAccessProfile{ + EnableInternalIngress: to.Ptr(true), + }, ClusterVersion: to.Ptr("0.0.1"), IdentityProfile: &armhdinsightcontainers.IdentityProfile{ MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), @@ -656,6 +1323,10 @@ func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPut() { // to.Ptr("testuser1"), // to.Ptr("testuser2")}, // }, + // ClusterAccessProfile: &armhdinsightcontainers.ClusterAccessProfile{ + // EnableInternalIngress: to.Ptr(true), + // PrivateLinkServiceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.Network/privateLinkServices/testpls"), + // }, // ClusterVersion: to.Ptr("0.0.1"), // Components: []*armhdinsightcontainers.ClusterComponentsItem{ // { @@ -666,12 +1337,15 @@ func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPut() { // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ // { // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), + // PrivateSSHEndpoint: to.Ptr("cluster1-int.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-0"), // }, // { // Endpoint: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), + // PrivateSSHEndpoint: to.Ptr("cluster1-int.clusterpool1.westus2.projecthilo.net/ssh/host/sshnode-1"), // }}, // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // PrivateFqdn: to.Ptr("cluster1-int.clusterpool1.westus2.projecthilo.net"), // }, // }, // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ @@ -741,8 +1415,8 @@ func ExampleClustersClient_BeginCreate_hdInsightSparkClusterPut() { // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/PatchCluster.json -func ExampleClustersClient_BeginUpdate() { +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/PatchCluster.json +func ExampleClustersClient_BeginUpdate_hdInsightClustersPatchTags() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { log.Fatalf("failed to obtain a credential: %v", err) @@ -753,7 +1427,6 @@ func ExampleClustersClient_BeginUpdate() { log.Fatalf("failed to create client: %v", err) } poller, err := clientFactory.NewClustersClient().BeginUpdate(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.ClusterPatch{ - Location: to.Ptr("West US 2"), Properties: &armhdinsightcontainers.ClusterPatchProperties{ ClusterProfile: &armhdinsightcontainers.UpdatableClusterProfile{ AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ @@ -910,7 +1583,7 @@ func ExampleClustersClient_BeginUpdate() { // TimeZone: to.Ptr("Cen. Australia Standard Time"), // }, // }, - // ClusterVersion: to.Ptr("1.0.1"), + // ClusterVersion: to.Ptr("1.0.6"), // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ // SSH: []*armhdinsightcontainers.SSHConnectivityEndpoint{ // { @@ -928,8 +1601,6 @@ func ExampleClustersClient_BeginUpdate() { // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), // }, - // KafkaProfile: map[string]any{ - // }, // LogAnalyticsProfile: &armhdinsightcontainers.ClusterLogAnalyticsProfile{ // ApplicationLogs: &armhdinsightcontainers.ClusterLogAnalyticsApplicationLogs{ // StdErrorEnabled: to.Ptr(true), @@ -938,7 +1609,7 @@ func ExampleClustersClient_BeginUpdate() { // Enabled: to.Ptr(true), // MetricsEnabled: to.Ptr(true), // }, - // OssVersion: to.Ptr("2.4.1"), + // OssVersion: to.Ptr("0.410.0"), // ServiceConfigsProfiles: []*armhdinsightcontainers.ClusterServiceConfigsProfile{ // { // Configs: []*armhdinsightcontainers.ClusterServiceConfig{ @@ -996,14 +1667,21 @@ func ExampleClustersClient_BeginUpdate() { // Count: to.Ptr[int32](2), // PodPrefix: to.Ptr("sshnode"), // }, + // TrinoProfile: &armhdinsightcontainers.TrinoProfile{ + // }, // }, - // ClusterType: to.Ptr("kafka"), + // ClusterType: to.Ptr("Trino"), // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ // Nodes: []*armhdinsightcontainers.NodeProfile{ // { - // Type: to.Ptr("worker"), - // Count: to.Ptr[int32](4), - // VMSize: to.Ptr("Standard_D3_v2"), + // Type: to.Ptr("Head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_E8as_v5"), + // }, + // { + // Type: to.Ptr("Worker"), + // Count: to.Ptr[int32](3), + // VMSize: to.Ptr("Standard_E8as_v5"), // }}, // }, // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), @@ -1012,7 +1690,146 @@ func ExampleClustersClient_BeginUpdate() { // } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/DeleteCluster.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/PatchRangerCluster.json +func ExampleClustersClient_BeginUpdate_hdInsightRangerClusterPatchTags() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armhdinsightcontainers.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewClustersClient().BeginUpdate(ctx, "hiloResourcegroup", "clusterpool1", "cluster1", armhdinsightcontainers.ClusterPatch{ + Properties: &armhdinsightcontainers.ClusterPatchProperties{ + ClusterProfile: &armhdinsightcontainers.UpdatableClusterProfile{ + RangerProfile: &armhdinsightcontainers.RangerProfile{ + RangerAdmin: &armhdinsightcontainers.RangerAdminSpec{ + Admins: []*string{ + to.Ptr("testuser1@contoso.com"), + to.Ptr("testuser2@contoso.com")}, + Database: &armhdinsightcontainers.RangerAdminSpecDatabase{ + Name: to.Ptr("testdb"), + Host: to.Ptr("testsqlserver.database.windows.net"), + PasswordSecretRef: to.Ptr("https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452"), + Username: to.Ptr("admin"), + }, + }, + RangerAudit: &armhdinsightcontainers.RangerAuditSpec{ + StorageAccount: to.Ptr("https://teststorage.blob.core.windows.net/testblob"), + }, + RangerUsersync: &armhdinsightcontainers.RangerUsersyncSpec{ + Enabled: to.Ptr(true), + Groups: []*string{ + to.Ptr("0a53828f-36c9-44c3-be3d-99a7fce977ad"), + to.Ptr("13be6971-79db-4f33-9d41-b25589ca25ac")}, + Mode: to.Ptr(armhdinsightcontainers.RangerUsersyncModeAutomatic), + Users: []*string{ + to.Ptr("testuser1@contoso.com"), + to.Ptr("testuser2@contoso.com")}, + }, + }, + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Cluster = armhdinsightcontainers.Cluster{ + // Name: to.Ptr("cluster1"), + // Type: to.Ptr("Microsoft.HDInsight/clusterPools/clusters"), + // ID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.HDInsight/clusterPools/clusterpool1/clusters/cluster1"), + // SystemData: &armhdinsightcontainers.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-03T01:01:01.107Z"); return t}()), + // CreatedBy: to.Ptr("string"), + // CreatedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2020-02-04T02:03:01.197Z"); return t}()), + // LastModifiedBy: to.Ptr("string"), + // LastModifiedByType: to.Ptr(armhdinsightcontainers.CreatedByTypeUser), + // }, + // Location: to.Ptr("West US 2"), + // Tags: map[string]*string{ + // "tag1": to.Ptr("value1"), + // "tag2": to.Ptr("value2"), + // }, + // Properties: &armhdinsightcontainers.ClusterResourceProperties{ + // ClusterProfile: &armhdinsightcontainers.ClusterProfile{ + // AuthorizationProfile: &armhdinsightcontainers.AuthorizationProfile{ + // UserIDs: []*string{ + // to.Ptr("testuser1"), + // to.Ptr("testuser2")}, + // }, + // ClusterVersion: to.Ptr("1.0.1"), + // ConnectivityProfile: &armhdinsightcontainers.ConnectivityProfile{ + // Web: &armhdinsightcontainers.ConnectivityProfileWeb{ + // Fqdn: to.Ptr("cluster1.clusterpool1.westus2.projecthilo.net"), + // }, + // }, + // IdentityProfile: &armhdinsightcontainers.IdentityProfile{ + // MsiClientID: to.Ptr("de91f1d8-767f-460a-ac11-3cf103f74b34"), + // MsiObjectID: to.Ptr("40491351-c240-4042-91e0-f644a1d2b441"), + // MsiResourceID: to.Ptr("/subscriptions/subid/resourceGroups/hiloResourcegroup/providers/Microsoft.ManagedIdentity/userAssignedIdentities/test-msi"), + // }, + // LogAnalyticsProfile: &armhdinsightcontainers.ClusterLogAnalyticsProfile{ + // ApplicationLogs: &armhdinsightcontainers.ClusterLogAnalyticsApplicationLogs{ + // StdErrorEnabled: to.Ptr(true), + // StdOutEnabled: to.Ptr(true), + // }, + // Enabled: to.Ptr(true), + // MetricsEnabled: to.Ptr(true), + // }, + // OssVersion: to.Ptr("2.4.1"), + // RangerProfile: &armhdinsightcontainers.RangerProfile{ + // RangerAdmin: &armhdinsightcontainers.RangerAdminSpec{ + // Admins: []*string{ + // to.Ptr("testuser1@contoso.com"), + // to.Ptr("testuser2@contoso.com")}, + // Database: &armhdinsightcontainers.RangerAdminSpecDatabase{ + // Name: to.Ptr("testdb"), + // Host: to.Ptr("testsqlserver.database.windows.net"), + // PasswordSecretRef: to.Ptr("https://testkv.vault.azure.net/secrets/mysecret/5df6584d9c25418c8d900240aa6c3452"), + // Username: to.Ptr("admin"), + // }, + // }, + // RangerAudit: &armhdinsightcontainers.RangerAuditSpec{ + // StorageAccount: to.Ptr("https://teststorage.blob.core.windows.net/testblob"), + // }, + // RangerUsersync: &armhdinsightcontainers.RangerUsersyncSpec{ + // Enabled: to.Ptr(true), + // Groups: []*string{ + // to.Ptr("0a53828f-36c9-44c3-be3d-99a7fce977ad"), + // to.Ptr("13be6971-79db-4f33-9d41-b25589ca25ac")}, + // Mode: to.Ptr(armhdinsightcontainers.RangerUsersyncModeAutomatic), + // Users: []*string{ + // to.Ptr("testuser1@contoso.com"), + // to.Ptr("testuser2@contoso.com")}, + // }, + // }, + // }, + // ClusterType: to.Ptr("ranger"), + // ComputeProfile: &armhdinsightcontainers.ComputeProfile{ + // Nodes: []*armhdinsightcontainers.NodeProfile{ + // { + // Type: to.Ptr("head"), + // Count: to.Ptr[int32](2), + // VMSize: to.Ptr("Standard_D3_v2"), + // }}, + // }, + // DeploymentID: to.Ptr("45cd32aead6e4a91b079a0cdbfac8c36"), + // ProvisioningState: to.Ptr(armhdinsightcontainers.ProvisioningStatusSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/DeleteCluster.json func ExampleClustersClient_BeginDelete() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -1033,7 +1850,7 @@ func ExampleClustersClient_BeginDelete() { } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListClusterServiceConfigs.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterServiceConfigs.json func ExampleClustersClient_NewListServiceConfigsPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -1069,7 +1886,7 @@ func ExampleClustersClient_NewListServiceConfigsPager() { } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/ListClusterInstanceViews.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/ListClusterInstanceViews.json func ExampleClustersClient_NewListInstanceViewsPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -1113,7 +1930,7 @@ func ExampleClustersClient_NewListInstanceViewsPager() { } } -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/GetClusterInstanceView.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/GetClusterInstanceView.json func ExampleClustersClient_GetInstanceView() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/constants.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/constants.go index f07f8720f3dc..6a26714ca441 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/constants.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/constants.go @@ -10,7 +10,7 @@ package armhdinsightcontainers const ( moduleName = "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" - moduleVersion = "v0.2.0" + moduleVersion = "v0.3.0" ) // Action - A string property that indicates the action to be performed on the Flink job. It can have one of the following @@ -21,8 +21,10 @@ type Action string const ( ActionCANCEL Action = "CANCEL" ActionDELETE Action = "DELETE" + ActionLASTSTATEUPDATE Action = "LAST_STATE_UPDATE" ActionLISTSAVEPOINT Action = "LIST_SAVEPOINT" ActionNEW Action = "NEW" + ActionRELAUNCH Action = "RE_LAUNCH" ActionSAVEPOINT Action = "SAVEPOINT" ActionSTART Action = "START" ActionSTATELESSUPDATE Action = "STATELESS_UPDATE" @@ -35,8 +37,10 @@ func PossibleActionValues() []Action { return []Action{ ActionCANCEL, ActionDELETE, + ActionLASTSTATEUPDATE, ActionLISTSAVEPOINT, ActionNEW, + ActionRELAUNCH, ActionSAVEPOINT, ActionSTART, ActionSTATELESSUPDATE, @@ -75,6 +79,70 @@ func PossibleAutoscaleTypeValues() []AutoscaleType { } } +// ClusterAvailableUpgradeType - Type of upgrade. +type ClusterAvailableUpgradeType string + +const ( + ClusterAvailableUpgradeTypeAKSPatchUpgrade ClusterAvailableUpgradeType = "AKSPatchUpgrade" + ClusterAvailableUpgradeTypeHotfixUpgrade ClusterAvailableUpgradeType = "HotfixUpgrade" +) + +// PossibleClusterAvailableUpgradeTypeValues returns the possible values for the ClusterAvailableUpgradeType const type. +func PossibleClusterAvailableUpgradeTypeValues() []ClusterAvailableUpgradeType { + return []ClusterAvailableUpgradeType{ + ClusterAvailableUpgradeTypeAKSPatchUpgrade, + ClusterAvailableUpgradeTypeHotfixUpgrade, + } +} + +// ClusterPoolAvailableUpgradeType - Type of upgrade. +type ClusterPoolAvailableUpgradeType string + +const ( + ClusterPoolAvailableUpgradeTypeAKSPatchUpgrade ClusterPoolAvailableUpgradeType = "AKSPatchUpgrade" + ClusterPoolAvailableUpgradeTypeNodeOsUpgrade ClusterPoolAvailableUpgradeType = "NodeOsUpgrade" +) + +// PossibleClusterPoolAvailableUpgradeTypeValues returns the possible values for the ClusterPoolAvailableUpgradeType const type. +func PossibleClusterPoolAvailableUpgradeTypeValues() []ClusterPoolAvailableUpgradeType { + return []ClusterPoolAvailableUpgradeType{ + ClusterPoolAvailableUpgradeTypeAKSPatchUpgrade, + ClusterPoolAvailableUpgradeTypeNodeOsUpgrade, + } +} + +// ClusterPoolUpgradeType - Type of upgrade. +type ClusterPoolUpgradeType string + +const ( + ClusterPoolUpgradeTypeAKSPatchUpgrade ClusterPoolUpgradeType = "AKSPatchUpgrade" + ClusterPoolUpgradeTypeNodeOsUpgrade ClusterPoolUpgradeType = "NodeOsUpgrade" +) + +// PossibleClusterPoolUpgradeTypeValues returns the possible values for the ClusterPoolUpgradeType const type. +func PossibleClusterPoolUpgradeTypeValues() []ClusterPoolUpgradeType { + return []ClusterPoolUpgradeType{ + ClusterPoolUpgradeTypeAKSPatchUpgrade, + ClusterPoolUpgradeTypeNodeOsUpgrade, + } +} + +// ClusterUpgradeType - Type of upgrade. +type ClusterUpgradeType string + +const ( + ClusterUpgradeTypeAKSPatchUpgrade ClusterUpgradeType = "AKSPatchUpgrade" + ClusterUpgradeTypeHotfixUpgrade ClusterUpgradeType = "HotfixUpgrade" +) + +// PossibleClusterUpgradeTypeValues returns the possible values for the ClusterUpgradeType const type. +func PossibleClusterUpgradeTypeValues() []ClusterUpgradeType { + return []ClusterUpgradeType{ + ClusterUpgradeTypeAKSPatchUpgrade, + ClusterUpgradeTypeHotfixUpgrade, + } +} + // ComparisonOperator - The comparison operator. type ComparisonOperator string @@ -132,6 +200,99 @@ func PossibleCreatedByTypeValues() []CreatedByType { } } +// CurrentClusterAksVersionStatus - Current AKS version's status: whether it is deprecated or supported +type CurrentClusterAksVersionStatus string + +const ( + CurrentClusterAksVersionStatusDeprecated CurrentClusterAksVersionStatus = "Deprecated" + CurrentClusterAksVersionStatusSupported CurrentClusterAksVersionStatus = "Supported" +) + +// PossibleCurrentClusterAksVersionStatusValues returns the possible values for the CurrentClusterAksVersionStatus const type. +func PossibleCurrentClusterAksVersionStatusValues() []CurrentClusterAksVersionStatus { + return []CurrentClusterAksVersionStatus{ + CurrentClusterAksVersionStatusDeprecated, + CurrentClusterAksVersionStatusSupported, + } +} + +// CurrentClusterPoolAksVersionStatus - Current AKS version's status: whether it is deprecated or supported +type CurrentClusterPoolAksVersionStatus string + +const ( + CurrentClusterPoolAksVersionStatusDeprecated CurrentClusterPoolAksVersionStatus = "Deprecated" + CurrentClusterPoolAksVersionStatusSupported CurrentClusterPoolAksVersionStatus = "Supported" +) + +// PossibleCurrentClusterPoolAksVersionStatusValues returns the possible values for the CurrentClusterPoolAksVersionStatus const type. +func PossibleCurrentClusterPoolAksVersionStatusValues() []CurrentClusterPoolAksVersionStatus { + return []CurrentClusterPoolAksVersionStatus{ + CurrentClusterPoolAksVersionStatusDeprecated, + CurrentClusterPoolAksVersionStatusSupported, + } +} + +// DataDiskType - Managed Disk Type. +type DataDiskType string + +const ( + DataDiskTypePremiumSSDLRS DataDiskType = "Premium_SSD_LRS" + DataDiskTypePremiumSSDV2LRS DataDiskType = "Premium_SSD_v2_LRS" + DataDiskTypePremiumSSDZRS DataDiskType = "Premium_SSD_ZRS" + DataDiskTypeStandardHDDLRS DataDiskType = "Standard_HDD_LRS" + DataDiskTypeStandardSSDLRS DataDiskType = "Standard_SSD_LRS" + DataDiskTypeStandardSSDZRS DataDiskType = "Standard_SSD_ZRS" +) + +// PossibleDataDiskTypeValues returns the possible values for the DataDiskType const type. +func PossibleDataDiskTypeValues() []DataDiskType { + return []DataDiskType{ + DataDiskTypePremiumSSDLRS, + DataDiskTypePremiumSSDV2LRS, + DataDiskTypePremiumSSDZRS, + DataDiskTypeStandardHDDLRS, + DataDiskTypeStandardSSDLRS, + DataDiskTypeStandardSSDZRS, + } +} + +// DbConnectionAuthenticationMode - The authentication mode to connect to your Hive metastore database. More details: +// https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization +type DbConnectionAuthenticationMode string + +const ( + // DbConnectionAuthenticationModeIdentityAuth - The managed-identity-based authentication to connect to your Hive metastore + // database. + DbConnectionAuthenticationModeIdentityAuth DbConnectionAuthenticationMode = "IdentityAuth" + // DbConnectionAuthenticationModeSQLAuth - The password-based authentication to connect to your Hive metastore database. + DbConnectionAuthenticationModeSQLAuth DbConnectionAuthenticationMode = "SqlAuth" +) + +// PossibleDbConnectionAuthenticationModeValues returns the possible values for the DbConnectionAuthenticationMode const type. +func PossibleDbConnectionAuthenticationModeValues() []DbConnectionAuthenticationMode { + return []DbConnectionAuthenticationMode{ + DbConnectionAuthenticationModeIdentityAuth, + DbConnectionAuthenticationModeSQLAuth, + } +} + +// DeploymentMode - A string property that indicates the deployment mode of Flink cluster. It can have one of the following +// enum values => Application, Session. Default value is Session +type DeploymentMode string + +const ( + DeploymentModeApplication DeploymentMode = "Application" + DeploymentModeSession DeploymentMode = "Session" +) + +// PossibleDeploymentModeValues returns the possible values for the DeploymentMode const type. +func PossibleDeploymentModeValues() []DeploymentMode { + return []DeploymentMode{ + DeploymentModeApplication, + DeploymentModeSession, + } +} + // JobType - Type of cluster job. type JobType string @@ -164,6 +325,26 @@ func PossibleKeyVaultObjectTypeValues() []KeyVaultObjectType { } } +// MetastoreDbConnectionAuthenticationMode - The authentication mode to connect to your Hive metastore database. More details: +// https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization +type MetastoreDbConnectionAuthenticationMode string + +const ( + // MetastoreDbConnectionAuthenticationModeIdentityAuth - The managed-identity-based authentication to connect to your Hive + // metastore database. + MetastoreDbConnectionAuthenticationModeIdentityAuth MetastoreDbConnectionAuthenticationMode = "IdentityAuth" + // MetastoreDbConnectionAuthenticationModeSQLAuth - The password-based authentication to connect to your Hive metastore database. + MetastoreDbConnectionAuthenticationModeSQLAuth MetastoreDbConnectionAuthenticationMode = "SqlAuth" +) + +// PossibleMetastoreDbConnectionAuthenticationModeValues returns the possible values for the MetastoreDbConnectionAuthenticationMode const type. +func PossibleMetastoreDbConnectionAuthenticationModeValues() []MetastoreDbConnectionAuthenticationMode { + return []MetastoreDbConnectionAuthenticationMode{ + MetastoreDbConnectionAuthenticationModeIdentityAuth, + MetastoreDbConnectionAuthenticationModeSQLAuth, + } +} + // Origin - The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit logs UX. Default // value is "user,system" type Origin string @@ -183,6 +364,26 @@ func PossibleOriginValues() []Origin { } } +// OutboundType - This can only be set at cluster pool creation time and cannot be changed later. +type OutboundType string + +const ( + // OutboundTypeLoadBalancer - The load balancer is used for egress through an AKS assigned public IP. This supports Kubernetes + // services of type 'loadBalancer'. + OutboundTypeLoadBalancer OutboundType = "loadBalancer" + // OutboundTypeUserDefinedRouting - Egress paths must be defined by the user. This is an advanced scenario and requires proper + // network configuration. + OutboundTypeUserDefinedRouting OutboundType = "userDefinedRouting" +) + +// PossibleOutboundTypeValues returns the possible values for the OutboundType const type. +func PossibleOutboundTypeValues() []OutboundType { + return []OutboundType{ + OutboundTypeLoadBalancer, + OutboundTypeUserDefinedRouting, + } +} + // ProvisioningStatus - Provisioning state of the resource. type ProvisioningStatus string @@ -203,6 +404,22 @@ func PossibleProvisioningStatusValues() []ProvisioningStatus { } } +// RangerUsersyncMode - User & groups can be synced automatically or via a static list that's refreshed. +type RangerUsersyncMode string + +const ( + RangerUsersyncModeAutomatic RangerUsersyncMode = "automatic" + RangerUsersyncModeStatic RangerUsersyncMode = "static" +) + +// PossibleRangerUsersyncModeValues returns the possible values for the RangerUsersyncMode const type. +func PossibleRangerUsersyncModeValues() []RangerUsersyncMode { + return []RangerUsersyncMode{ + RangerUsersyncModeAutomatic, + RangerUsersyncModeStatic, + } +} + // ScaleActionType - The action type. type ScaleActionType string @@ -243,3 +460,42 @@ func PossibleScheduleDayValues() []ScheduleDay { ScheduleDayWednesday, } } + +// Severity - Severity of this upgrade. +type Severity string + +const ( + SeverityCritical Severity = "critical" + SeverityHigh Severity = "high" + SeverityLow Severity = "low" + SeverityMedium Severity = "medium" +) + +// PossibleSeverityValues returns the possible values for the Severity const type. +func PossibleSeverityValues() []Severity { + return []Severity{ + SeverityCritical, + SeverityHigh, + SeverityLow, + SeverityMedium, + } +} + +// UpgradeMode - A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the +// following enum values => STATELESSUPDATE, UPDATE, LASTSTATE_UPDATE. +type UpgradeMode string + +const ( + UpgradeModeLASTSTATEUPDATE UpgradeMode = "LAST_STATE_UPDATE" + UpgradeModeSTATELESSUPDATE UpgradeMode = "STATELESS_UPDATE" + UpgradeModeUPDATE UpgradeMode = "UPDATE" +) + +// PossibleUpgradeModeValues returns the possible values for the UpgradeMode const type. +func PossibleUpgradeModeValues() []UpgradeMode { + return []UpgradeMode{ + UpgradeModeLASTSTATEUPDATE, + UpgradeModeSTATELESSUPDATE, + UpgradeModeUPDATE, + } +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusteravailableupgrades_server.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusteravailableupgrades_server.go new file mode 100644 index 000000000000..b33e63e86b29 --- /dev/null +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusteravailableupgrades_server.go @@ -0,0 +1,116 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" + "net/http" + "net/url" + "regexp" +) + +// ClusterAvailableUpgradesServer is a fake server for instances of the armhdinsightcontainers.ClusterAvailableUpgradesClient type. +type ClusterAvailableUpgradesServer struct { + // NewListPager is the fake for method ClusterAvailableUpgradesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, clusterPoolName string, clusterName string, options *armhdinsightcontainers.ClusterAvailableUpgradesClientListOptions) (resp azfake.PagerResponder[armhdinsightcontainers.ClusterAvailableUpgradesClientListResponse]) +} + +// NewClusterAvailableUpgradesServerTransport creates a new instance of ClusterAvailableUpgradesServerTransport with the provided implementation. +// The returned ClusterAvailableUpgradesServerTransport instance is connected to an instance of armhdinsightcontainers.ClusterAvailableUpgradesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewClusterAvailableUpgradesServerTransport(srv *ClusterAvailableUpgradesServer) *ClusterAvailableUpgradesServerTransport { + return &ClusterAvailableUpgradesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armhdinsightcontainers.ClusterAvailableUpgradesClientListResponse]](), + } +} + +// ClusterAvailableUpgradesServerTransport connects instances of armhdinsightcontainers.ClusterAvailableUpgradesClient to instances of ClusterAvailableUpgradesServer. +// Don't use this type directly, use NewClusterAvailableUpgradesServerTransport instead. +type ClusterAvailableUpgradesServerTransport struct { + srv *ClusterAvailableUpgradesServer + newListPager *tracker[azfake.PagerResponder[armhdinsightcontainers.ClusterAvailableUpgradesClientListResponse]] +} + +// Do implements the policy.Transporter interface for ClusterAvailableUpgradesServerTransport. +func (c *ClusterAvailableUpgradesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ClusterAvailableUpgradesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *ClusterAvailableUpgradesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.HDInsight/clusterpools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/clusters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableUpgrades` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + clusterPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("clusterPoolName")]) + if err != nil { + return nil, err + } + clusterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("clusterName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(resourceGroupNameParam, clusterPoolNameParam, clusterNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armhdinsightcontainers.ClusterAvailableUpgradesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterjobs_server.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterjobs_server.go index d1f9045c3e1b..96837ea4b88d 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterjobs_server.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterjobs_server.go @@ -91,6 +91,7 @@ func (c *ClusterJobsServerTransport) dispatchNewListPager(req *http.Request) (*h if matches == nil || len(matches) < 4 { return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) } + qp := req.URL.Query() resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) if err != nil { return nil, err @@ -103,7 +104,18 @@ func (c *ClusterJobsServerTransport) dispatchNewListPager(req *http.Request) (*h if err != nil { return nil, err } - resp := c.srv.NewListPager(resourceGroupNameParam, clusterPoolNameParam, clusterNameParam, nil) + filterUnescaped, err := url.QueryUnescape(qp.Get("$filter")) + if err != nil { + return nil, err + } + filterParam := getOptional(filterUnescaped) + var options *armhdinsightcontainers.ClusterJobsClientListOptions + if filterParam != nil { + options = &armhdinsightcontainers.ClusterJobsClientListOptions{ + Filter: filterParam, + } + } + resp := c.srv.NewListPager(resourceGroupNameParam, clusterPoolNameParam, clusterNameParam, options) newListPager = &resp c.newListPager.add(req, newListPager) server.PagerResponderInjectNextLinks(newListPager, req, func(page *armhdinsightcontainers.ClusterJobsClientListResponse, createLink func() string) { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpoolavailableupgrades_server.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpoolavailableupgrades_server.go new file mode 100644 index 000000000000..9ae95f2c6060 --- /dev/null +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpoolavailableupgrades_server.go @@ -0,0 +1,112 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package fake + +import ( + "errors" + "fmt" + azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" + "net/http" + "net/url" + "regexp" +) + +// ClusterPoolAvailableUpgradesServer is a fake server for instances of the armhdinsightcontainers.ClusterPoolAvailableUpgradesClient type. +type ClusterPoolAvailableUpgradesServer struct { + // NewListPager is the fake for method ClusterPoolAvailableUpgradesClient.NewListPager + // HTTP status codes to indicate success: http.StatusOK + NewListPager func(resourceGroupName string, clusterPoolName string, options *armhdinsightcontainers.ClusterPoolAvailableUpgradesClientListOptions) (resp azfake.PagerResponder[armhdinsightcontainers.ClusterPoolAvailableUpgradesClientListResponse]) +} + +// NewClusterPoolAvailableUpgradesServerTransport creates a new instance of ClusterPoolAvailableUpgradesServerTransport with the provided implementation. +// The returned ClusterPoolAvailableUpgradesServerTransport instance is connected to an instance of armhdinsightcontainers.ClusterPoolAvailableUpgradesClient via the +// azcore.ClientOptions.Transporter field in the client's constructor parameters. +func NewClusterPoolAvailableUpgradesServerTransport(srv *ClusterPoolAvailableUpgradesServer) *ClusterPoolAvailableUpgradesServerTransport { + return &ClusterPoolAvailableUpgradesServerTransport{ + srv: srv, + newListPager: newTracker[azfake.PagerResponder[armhdinsightcontainers.ClusterPoolAvailableUpgradesClientListResponse]](), + } +} + +// ClusterPoolAvailableUpgradesServerTransport connects instances of armhdinsightcontainers.ClusterPoolAvailableUpgradesClient to instances of ClusterPoolAvailableUpgradesServer. +// Don't use this type directly, use NewClusterPoolAvailableUpgradesServerTransport instead. +type ClusterPoolAvailableUpgradesServerTransport struct { + srv *ClusterPoolAvailableUpgradesServer + newListPager *tracker[azfake.PagerResponder[armhdinsightcontainers.ClusterPoolAvailableUpgradesClientListResponse]] +} + +// Do implements the policy.Transporter interface for ClusterPoolAvailableUpgradesServerTransport. +func (c *ClusterPoolAvailableUpgradesServerTransport) Do(req *http.Request) (*http.Response, error) { + rawMethod := req.Context().Value(runtime.CtxAPINameKey{}) + method, ok := rawMethod.(string) + if !ok { + return nil, nonRetriableError{errors.New("unable to dispatch request, missing value for CtxAPINameKey")} + } + + var resp *http.Response + var err error + + switch method { + case "ClusterPoolAvailableUpgradesClient.NewListPager": + resp, err = c.dispatchNewListPager(req) + default: + err = fmt.Errorf("unhandled API %s", method) + } + + if err != nil { + return nil, err + } + + return resp, nil +} + +func (c *ClusterPoolAvailableUpgradesServerTransport) dispatchNewListPager(req *http.Request) (*http.Response, error) { + if c.srv.NewListPager == nil { + return nil, &nonRetriableError{errors.New("fake for method NewListPager not implemented")} + } + newListPager := c.newListPager.get(req) + if newListPager == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.HDInsight/clusterpools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/availableUpgrades` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + clusterPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("clusterPoolName")]) + if err != nil { + return nil, err + } + resp := c.srv.NewListPager(resourceGroupNameParam, clusterPoolNameParam, nil) + newListPager = &resp + c.newListPager.add(req, newListPager) + server.PagerResponderInjectNextLinks(newListPager, req, func(page *armhdinsightcontainers.ClusterPoolAvailableUpgradesClientListResponse, createLink func() string) { + page.NextLink = to.Ptr(createLink()) + }) + } + resp, err := server.PagerResponderNext(newListPager, req) + if err != nil { + return nil, err + } + if !contains([]int{http.StatusOK}, resp.StatusCode) { + c.newListPager.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK", resp.StatusCode)} + } + if !server.PagerResponderMore(newListPager) { + c.newListPager.remove(req) + } + return resp, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpools_server.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpools_server.go index 524306753e45..4f06640c26fc 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpools_server.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusterpools_server.go @@ -47,6 +47,10 @@ type ClusterPoolsServer struct { // BeginUpdateTags is the fake for method ClusterPoolsClient.BeginUpdateTags // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted BeginUpdateTags func(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPoolTags armhdinsightcontainers.TagsObject, options *armhdinsightcontainers.ClusterPoolsClientBeginUpdateTagsOptions) (resp azfake.PollerResponder[armhdinsightcontainers.ClusterPoolsClientUpdateTagsResponse], errResp azfake.ErrorResponder) + + // BeginUpgrade is the fake for method ClusterPoolsClient.BeginUpgrade + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpgrade func(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterPoolUpgradeRequest armhdinsightcontainers.ClusterPoolUpgrade, options *armhdinsightcontainers.ClusterPoolsClientBeginUpgradeOptions) (resp azfake.PollerResponder[armhdinsightcontainers.ClusterPoolsClientUpgradeResponse], errResp azfake.ErrorResponder) } // NewClusterPoolsServerTransport creates a new instance of ClusterPoolsServerTransport with the provided implementation. @@ -60,6 +64,7 @@ func NewClusterPoolsServerTransport(srv *ClusterPoolsServer) *ClusterPoolsServer newListByResourceGroupPager: newTracker[azfake.PagerResponder[armhdinsightcontainers.ClusterPoolsClientListByResourceGroupResponse]](), newListBySubscriptionPager: newTracker[azfake.PagerResponder[armhdinsightcontainers.ClusterPoolsClientListBySubscriptionResponse]](), beginUpdateTags: newTracker[azfake.PollerResponder[armhdinsightcontainers.ClusterPoolsClientUpdateTagsResponse]](), + beginUpgrade: newTracker[azfake.PollerResponder[armhdinsightcontainers.ClusterPoolsClientUpgradeResponse]](), } } @@ -72,6 +77,7 @@ type ClusterPoolsServerTransport struct { newListByResourceGroupPager *tracker[azfake.PagerResponder[armhdinsightcontainers.ClusterPoolsClientListByResourceGroupResponse]] newListBySubscriptionPager *tracker[azfake.PagerResponder[armhdinsightcontainers.ClusterPoolsClientListBySubscriptionResponse]] beginUpdateTags *tracker[azfake.PollerResponder[armhdinsightcontainers.ClusterPoolsClientUpdateTagsResponse]] + beginUpgrade *tracker[azfake.PollerResponder[armhdinsightcontainers.ClusterPoolsClientUpgradeResponse]] } // Do implements the policy.Transporter interface for ClusterPoolsServerTransport. @@ -98,6 +104,8 @@ func (c *ClusterPoolsServerTransport) Do(req *http.Request) (*http.Response, err resp, err = c.dispatchNewListBySubscriptionPager(req) case "ClusterPoolsClient.BeginUpdateTags": resp, err = c.dispatchBeginUpdateTags(req) + case "ClusterPoolsClient.BeginUpgrade": + resp, err = c.dispatchBeginUpgrade(req) default: err = fmt.Errorf("unhandled API %s", method) } @@ -351,3 +359,51 @@ func (c *ClusterPoolsServerTransport) dispatchBeginUpdateTags(req *http.Request) return resp, nil } + +func (c *ClusterPoolsServerTransport) dispatchBeginUpgrade(req *http.Request) (*http.Response, error) { + if c.srv.BeginUpgrade == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpgrade not implemented")} + } + beginUpgrade := c.beginUpgrade.get(req) + if beginUpgrade == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.HDInsight/clusterpools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/upgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 3 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armhdinsightcontainers.ClusterPoolUpgrade](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + clusterPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("clusterPoolName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginUpgrade(req.Context(), resourceGroupNameParam, clusterPoolNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpgrade = &respr + c.beginUpgrade.add(req, beginUpgrade) + } + + resp, err := server.PollerResponderNext(beginUpgrade, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginUpgrade.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpgrade) { + c.beginUpgrade.remove(req) + } + + return resp, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusters_server.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusters_server.go index c17aeefe66d0..1a639c3af878 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusters_server.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/clusters_server.go @@ -59,6 +59,10 @@ type ClustersServer struct { // BeginUpdate is the fake for method ClustersClient.BeginUpdate // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted BeginUpdate func(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterPatchRequest armhdinsightcontainers.ClusterPatch, options *armhdinsightcontainers.ClustersClientBeginUpdateOptions) (resp azfake.PollerResponder[armhdinsightcontainers.ClustersClientUpdateResponse], errResp azfake.ErrorResponder) + + // BeginUpgrade is the fake for method ClustersClient.BeginUpgrade + // HTTP status codes to indicate success: http.StatusOK, http.StatusAccepted + BeginUpgrade func(ctx context.Context, resourceGroupName string, clusterPoolName string, clusterName string, clusterUpgradeRequest armhdinsightcontainers.ClusterUpgrade, options *armhdinsightcontainers.ClustersClientBeginUpgradeOptions) (resp azfake.PollerResponder[armhdinsightcontainers.ClustersClientUpgradeResponse], errResp azfake.ErrorResponder) } // NewClustersServerTransport creates a new instance of ClustersServerTransport with the provided implementation. @@ -74,6 +78,7 @@ func NewClustersServerTransport(srv *ClustersServer) *ClustersServerTransport { newListServiceConfigsPager: newTracker[azfake.PagerResponder[armhdinsightcontainers.ClustersClientListServiceConfigsResponse]](), beginResize: newTracker[azfake.PollerResponder[armhdinsightcontainers.ClustersClientResizeResponse]](), beginUpdate: newTracker[azfake.PollerResponder[armhdinsightcontainers.ClustersClientUpdateResponse]](), + beginUpgrade: newTracker[azfake.PollerResponder[armhdinsightcontainers.ClustersClientUpgradeResponse]](), } } @@ -88,6 +93,7 @@ type ClustersServerTransport struct { newListServiceConfigsPager *tracker[azfake.PagerResponder[armhdinsightcontainers.ClustersClientListServiceConfigsResponse]] beginResize *tracker[azfake.PollerResponder[armhdinsightcontainers.ClustersClientResizeResponse]] beginUpdate *tracker[azfake.PollerResponder[armhdinsightcontainers.ClustersClientUpdateResponse]] + beginUpgrade *tracker[azfake.PollerResponder[armhdinsightcontainers.ClustersClientUpgradeResponse]] } // Do implements the policy.Transporter interface for ClustersServerTransport. @@ -120,6 +126,8 @@ func (c *ClustersServerTransport) Do(req *http.Request) (*http.Response, error) resp, err = c.dispatchBeginResize(req) case "ClustersClient.BeginUpdate": resp, err = c.dispatchBeginUpdate(req) + case "ClustersClient.BeginUpgrade": + resp, err = c.dispatchBeginUpgrade(req) default: err = fmt.Errorf("unhandled API %s", method) } @@ -539,3 +547,55 @@ func (c *ClustersServerTransport) dispatchBeginUpdate(req *http.Request) (*http. return resp, nil } + +func (c *ClustersServerTransport) dispatchBeginUpgrade(req *http.Request) (*http.Response, error) { + if c.srv.BeginUpgrade == nil { + return nil, &nonRetriableError{errors.New("fake for method BeginUpgrade not implemented")} + } + beginUpgrade := c.beginUpgrade.get(req) + if beginUpgrade == nil { + const regexStr = `/subscriptions/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/resourceGroups/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/providers/Microsoft\.HDInsight/clusterpools/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/clusters/(?P[!#&$-;=?-\[\]_a-zA-Z0-9~%@]+)/upgrade` + regex := regexp.MustCompile(regexStr) + matches := regex.FindStringSubmatch(req.URL.EscapedPath()) + if matches == nil || len(matches) < 4 { + return nil, fmt.Errorf("failed to parse path %s", req.URL.Path) + } + body, err := server.UnmarshalRequestAsJSON[armhdinsightcontainers.ClusterUpgrade](req) + if err != nil { + return nil, err + } + resourceGroupNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("resourceGroupName")]) + if err != nil { + return nil, err + } + clusterPoolNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("clusterPoolName")]) + if err != nil { + return nil, err + } + clusterNameParam, err := url.PathUnescape(matches[regex.SubexpIndex("clusterName")]) + if err != nil { + return nil, err + } + respr, errRespr := c.srv.BeginUpgrade(req.Context(), resourceGroupNameParam, clusterPoolNameParam, clusterNameParam, body, nil) + if respErr := server.GetError(errRespr, req); respErr != nil { + return nil, respErr + } + beginUpgrade = &respr + c.beginUpgrade.add(req, beginUpgrade) + } + + resp, err := server.PollerResponderNext(beginUpgrade, req) + if err != nil { + return nil, err + } + + if !contains([]int{http.StatusOK, http.StatusAccepted}, resp.StatusCode) { + c.beginUpgrade.remove(req) + return nil, &nonRetriableError{fmt.Errorf("unexpected status code %d. acceptable values are http.StatusOK, http.StatusAccepted", resp.StatusCode)} + } + if !server.PollerResponderMore(beginUpgrade) { + c.beginUpgrade.remove(req) + } + + return resp, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/internal.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/internal.go index 5f75802a569e..94e060e89dbd 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/internal.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/internal.go @@ -11,6 +11,7 @@ package fake import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake/server" "net/http" + "reflect" "sync" ) @@ -31,6 +32,13 @@ func contains[T comparable](s []T, v T) bool { return false } +func getOptional[T any](v T) *T { + if reflect.ValueOf(v).IsZero() { + return nil + } + return &v +} + func newTracker[T any]() *tracker[T] { return &tracker[T]{ items: map[string]*T{}, diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/server_factory.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/server_factory.go index af9fe04f2f5f..ac15b00fe2cc 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/server_factory.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/server_factory.go @@ -21,7 +21,9 @@ import ( type ServerFactory struct { AvailableClusterPoolVersionsServer AvailableClusterPoolVersionsServer AvailableClusterVersionsServer AvailableClusterVersionsServer + ClusterAvailableUpgradesServer ClusterAvailableUpgradesServer ClusterJobsServer ClusterJobsServer + ClusterPoolAvailableUpgradesServer ClusterPoolAvailableUpgradesServer ClusterPoolsServer ClusterPoolsServer ClustersServer ClustersServer LocationsServer LocationsServer @@ -44,7 +46,9 @@ type ServerFactoryTransport struct { trMu sync.Mutex trAvailableClusterPoolVersionsServer *AvailableClusterPoolVersionsServerTransport trAvailableClusterVersionsServer *AvailableClusterVersionsServerTransport + trClusterAvailableUpgradesServer *ClusterAvailableUpgradesServerTransport trClusterJobsServer *ClusterJobsServerTransport + trClusterPoolAvailableUpgradesServer *ClusterPoolAvailableUpgradesServerTransport trClusterPoolsServer *ClusterPoolsServerTransport trClustersServer *ClustersServerTransport trLocationsServer *LocationsServerTransport @@ -74,9 +78,19 @@ func (s *ServerFactoryTransport) Do(req *http.Request) (*http.Response, error) { return NewAvailableClusterVersionsServerTransport(&s.srv.AvailableClusterVersionsServer) }) resp, err = s.trAvailableClusterVersionsServer.Do(req) + case "ClusterAvailableUpgradesClient": + initServer(s, &s.trClusterAvailableUpgradesServer, func() *ClusterAvailableUpgradesServerTransport { + return NewClusterAvailableUpgradesServerTransport(&s.srv.ClusterAvailableUpgradesServer) + }) + resp, err = s.trClusterAvailableUpgradesServer.Do(req) case "ClusterJobsClient": initServer(s, &s.trClusterJobsServer, func() *ClusterJobsServerTransport { return NewClusterJobsServerTransport(&s.srv.ClusterJobsServer) }) resp, err = s.trClusterJobsServer.Do(req) + case "ClusterPoolAvailableUpgradesClient": + initServer(s, &s.trClusterPoolAvailableUpgradesServer, func() *ClusterPoolAvailableUpgradesServerTransport { + return NewClusterPoolAvailableUpgradesServerTransport(&s.srv.ClusterPoolAvailableUpgradesServer) + }) + resp, err = s.trClusterPoolAvailableUpgradesServer.Do(req) case "ClusterPoolsClient": initServer(s, &s.trClusterPoolsServer, func() *ClusterPoolsServerTransport { return NewClusterPoolsServerTransport(&s.srv.ClusterPoolsServer) }) resp, err = s.trClusterPoolsServer.Do(req) diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/time_rfc3339.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/time_rfc3339.go index b0535a7b63e6..81f308b0d343 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/time_rfc3339.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/fake/time_rfc3339.go @@ -19,12 +19,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -40,17 +44,33 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -61,6 +81,10 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { return err } +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return @@ -74,7 +98,7 @@ func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { } func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateTimeRFC3339 diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.mod b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.mod index 361dd63fccc4..07ae1bdbf029 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.mod +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.mod @@ -4,16 +4,16 @@ go 1.18 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 ) require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/google/uuid v1.3.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/google/uuid v1.5.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect golang.org/x/crypto v0.19.0 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.sum b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.sum index 0766d184c256..a4a966ae2630 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.sum +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/go.sum @@ -1,28 +1,28 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= -github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/interfaces.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/interfaces.go index 7701f45fdf90..60fc150561a1 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/interfaces.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/interfaces.go @@ -8,6 +8,15 @@ package armhdinsightcontainers +// ClusterAvailableUpgradePropertiesClassification provides polymorphic access to related types. +// Call the interface's GetClusterAvailableUpgradeProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ClusterAvailableUpgradeAksPatchUpgradeProperties, *ClusterAvailableUpgradeHotfixUpgradeProperties, *ClusterAvailableUpgradeProperties +type ClusterAvailableUpgradePropertiesClassification interface { + // GetClusterAvailableUpgradeProperties returns the ClusterAvailableUpgradeProperties content of the underlying type. + GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties +} + // ClusterJobPropertiesClassification provides polymorphic access to related types. // Call the interface's GetClusterJobProperties() method to access the common type. // Use a type switch to determine the concrete type. The possible types are: @@ -16,3 +25,30 @@ type ClusterJobPropertiesClassification interface { // GetClusterJobProperties returns the ClusterJobProperties content of the underlying type. GetClusterJobProperties() *ClusterJobProperties } + +// ClusterPoolAvailableUpgradePropertiesClassification provides polymorphic access to related types. +// Call the interface's GetClusterPoolAvailableUpgradeProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ClusterPoolAvailableUpgradeAksPatchUpgradeProperties, *ClusterPoolAvailableUpgradeNodeOsUpgradeProperties, *ClusterPoolAvailableUpgradeProperties +type ClusterPoolAvailableUpgradePropertiesClassification interface { + // GetClusterPoolAvailableUpgradeProperties returns the ClusterPoolAvailableUpgradeProperties content of the underlying type. + GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties +} + +// ClusterPoolUpgradePropertiesClassification provides polymorphic access to related types. +// Call the interface's GetClusterPoolUpgradeProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ClusterPoolAKSPatchVersionUpgradeProperties, *ClusterPoolNodeOsImageUpdateProperties, *ClusterPoolUpgradeProperties +type ClusterPoolUpgradePropertiesClassification interface { + // GetClusterPoolUpgradeProperties returns the ClusterPoolUpgradeProperties content of the underlying type. + GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties +} + +// ClusterUpgradePropertiesClassification provides polymorphic access to related types. +// Call the interface's GetClusterUpgradeProperties() method to access the common type. +// Use a type switch to determine the concrete type. The possible types are: +// - *ClusterAKSPatchVersionUpgradeProperties, *ClusterHotfixUpgradeProperties, *ClusterUpgradeProperties +type ClusterUpgradePropertiesClassification interface { + // GetClusterUpgradeProperties returns the ClusterUpgradeProperties content of the underlying type. + GetClusterUpgradeProperties() *ClusterUpgradeProperties +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client.go index 6051d1c6c781..5344161fb65a 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client.go @@ -46,7 +46,7 @@ func NewLocationsClient(subscriptionID string, credential azcore.TokenCredential // CheckNameAvailability - Check the availability of the resource name. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - location - The name of the Azure region. // - nameAvailabilityParameters - The name and type of the resource. // - options - LocationsClientCheckNameAvailabilityOptions contains the optional parameters for the LocationsClient.CheckNameAvailability @@ -89,7 +89,7 @@ func (client *LocationsClient) checkNameAvailabilityCreateRequest(ctx context.Co return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} if err := runtime.MarshalAsJSON(req, nameAvailabilityParameters); err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client_example_test.go index dffc3337b882..9197dcc3449e 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/locations_client_example_test.go @@ -18,7 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/LocationsNameAvailability.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/LocationsNameAvailability.json func ExampleLocationsClient_CheckNameAvailability() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models.go index 8cd738fbe051..ad57b8e9a921 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models.go @@ -83,6 +83,137 @@ type Cluster struct { Type *string } +// ClusterAKSPatchVersionUpgradeProperties - Properties of upgrading cluster's AKS patch version. +type ClusterAKSPatchVersionUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterUpgradeType +} + +// GetClusterUpgradeProperties implements the ClusterUpgradePropertiesClassification interface for type ClusterAKSPatchVersionUpgradeProperties. +func (c *ClusterAKSPatchVersionUpgradeProperties) GetClusterUpgradeProperties() *ClusterUpgradeProperties { + return &ClusterUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + +// ClusterAccessProfile - Cluster access profile. +type ClusterAccessProfile struct { + // REQUIRED; Whether to create cluster using private IP instead of public IP. This property must be set at create time. + EnableInternalIngress *bool + + // READ-ONLY; Private link service resource ID. Only when enableInternalIngress is true, this property will be returned. + PrivateLinkServiceID *string +} + +// ClusterAvailableUpgrade - Cluster available upgrade. +type ClusterAvailableUpgrade struct { + // Gets or sets the properties. Define cluster upgrade specific properties. + Properties ClusterAvailableUpgradePropertiesClassification + + // READ-ONLY; Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}" + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// ClusterAvailableUpgradeAksPatchUpgradeProperties - Cluster available AKS patch version upgrade. +type ClusterAvailableUpgradeAksPatchUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterAvailableUpgradeType + + // Current node pool version. + CurrentVersion *string + + // Current AKS version's status: whether it is deprecated or supported + CurrentVersionStatus *CurrentClusterAksVersionStatus + + // Latest available version, which should be equal to AKS control plane version if it's not deprecated. + LatestVersion *string +} + +// GetClusterAvailableUpgradeProperties implements the ClusterAvailableUpgradePropertiesClassification interface for type +// ClusterAvailableUpgradeAksPatchUpgradeProperties. +func (c *ClusterAvailableUpgradeAksPatchUpgradeProperties) GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties { + return &ClusterAvailableUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + +// ClusterAvailableUpgradeHotfixUpgradeProperties - Cluster available hotfix version upgrade. +type ClusterAvailableUpgradeHotfixUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterAvailableUpgradeType + + // Name of component to be upgraded. + ComponentName *string + + // Created time of current available upgrade version + CreatedTime *time.Time + + // Hotfix version upgrade description. + Description *string + + // Extended properties of current available upgrade version + ExtendedProperties *string + + // Severity of this upgrade. + Severity *Severity + + // Source build number of current cluster component. + SourceBuildNumber *string + + // Source cluster version of current cluster component. + SourceClusterVersion *string + + // Source OSS version of current cluster component. + SourceOssVersion *string + + // Target build number of component to be upgraded. + TargetBuildNumber *string + + // Target cluster version of component to be upgraded. + TargetClusterVersion *string + + // Target OSS version of component to be upgraded. + TargetOssVersion *string +} + +// GetClusterAvailableUpgradeProperties implements the ClusterAvailableUpgradePropertiesClassification interface for type +// ClusterAvailableUpgradeHotfixUpgradeProperties. +func (c *ClusterAvailableUpgradeHotfixUpgradeProperties) GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties { + return &ClusterAvailableUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + +// ClusterAvailableUpgradeList - Collection of cluster available upgrade. +type ClusterAvailableUpgradeList struct { + // REQUIRED; Collection of Cluster available upgrade. + Value []*ClusterAvailableUpgrade + + // The URL of next result page. + NextLink *string +} + +// ClusterAvailableUpgradeProperties - Cluster available upgrade properties. +type ClusterAvailableUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterAvailableUpgradeType +} + +// GetClusterAvailableUpgradeProperties implements the ClusterAvailableUpgradePropertiesClassification interface for type +// ClusterAvailableUpgradeProperties. +func (c *ClusterAvailableUpgradeProperties) GetClusterAvailableUpgradeProperties() *ClusterAvailableUpgradeProperties { + return c +} + type ClusterComponentsItem struct { Name *string Version *string @@ -107,6 +238,31 @@ type ClusterConfigFile struct { Values map[string]*string } +// ClusterHotfixUpgradeProperties - Properties of upgrading cluster's hotfix. +type ClusterHotfixUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterUpgradeType + + // Name of component to be upgraded. + ComponentName *string + + // Target build number of component to be upgraded. + TargetBuildNumber *string + + // Target cluster version of component to be upgraded. + TargetClusterVersion *string + + // Target OSS version of component to be upgraded. + TargetOssVersion *string +} + +// GetClusterUpgradeProperties implements the ClusterUpgradePropertiesClassification interface for type ClusterHotfixUpgradeProperties. +func (c *ClusterHotfixUpgradeProperties) GetClusterUpgradeProperties() *ClusterUpgradeProperties { + return &ClusterUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + // ClusterInstanceViewPropertiesStatus - Status of the instance view. type ClusterInstanceViewPropertiesStatus struct { // REQUIRED; The cluster ready status @@ -130,7 +286,7 @@ type ClusterInstanceViewResult struct { // ClusterInstanceViewResultProperties - Properties of the instance view. type ClusterInstanceViewResultProperties struct { - // REQUIRED; List of statuses of relevant services that make up the HDInsight on aks cluster to surface to the customer. + // REQUIRED; List of statuses of relevant services that make up the HDInsight on AKS cluster to surface to the customer. ServiceStatuses []*ServiceStatus // REQUIRED; Status of the instance view. @@ -214,26 +370,11 @@ type ClusterLogAnalyticsProfile struct { // ClusterPatch - The patch for a cluster. type ClusterPatch struct { - // REQUIRED; The geo-location where the resource lives - Location *string - // Define cluster patch specific properties. Properties *ClusterPatchProperties // Resource tags. Tags map[string]*string - - // READ-ONLY; Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}" - ID *string - - // READ-ONLY; The name of the resource - Name *string - - // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. - SystemData *SystemData - - // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" - Type *string } // ClusterPatchProperties - Cluster resource patch data. @@ -266,6 +407,110 @@ type ClusterPool struct { Type *string } +// ClusterPoolAKSPatchVersionUpgradeProperties - Properties of upgrading cluster pool's AKS patch version. +type ClusterPoolAKSPatchVersionUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterPoolUpgradeType + + // Target AKS version. When it's not set, latest version will be used. When upgradeClusterPool is true and upgradeAllClusterNodes + // is false, target version should be greater or equal to current version. + // When upgradeClusterPool is false and upgradeAllClusterNodes is true, target version should be equal to AKS version of cluster + // pool. + TargetAksVersion *string + + // whether upgrade all clusters' nodes. If it's true, upgradeClusterPool should be false. + UpgradeAllClusterNodes *bool + + // whether upgrade cluster pool or not. If it's true, upgradeAllClusterNodes should be false. + UpgradeClusterPool *bool +} + +// GetClusterPoolUpgradeProperties implements the ClusterPoolUpgradePropertiesClassification interface for type ClusterPoolAKSPatchVersionUpgradeProperties. +func (c *ClusterPoolAKSPatchVersionUpgradeProperties) GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties { + return &ClusterPoolUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + +// ClusterPoolAvailableUpgrade - Cluster pool available upgrade. +type ClusterPoolAvailableUpgrade struct { + // Gets or sets the properties. Define cluster pool upgrade specific properties. + Properties ClusterPoolAvailableUpgradePropertiesClassification + + // READ-ONLY; Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}" + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// ClusterPoolAvailableUpgradeAksPatchUpgradeProperties - Cluster pool available AKS patch version upgrade. +type ClusterPoolAvailableUpgradeAksPatchUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterPoolAvailableUpgradeType + + // Current AKS version. + CurrentVersion *string + + // Current AKS version's status: whether it is deprecated or supported + CurrentVersionStatus *CurrentClusterPoolAksVersionStatus + + // Latest AKS patch version. + LatestVersion *string +} + +// GetClusterPoolAvailableUpgradeProperties implements the ClusterPoolAvailableUpgradePropertiesClassification interface for +// type ClusterPoolAvailableUpgradeAksPatchUpgradeProperties. +func (c *ClusterPoolAvailableUpgradeAksPatchUpgradeProperties) GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties { + return &ClusterPoolAvailableUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + +// ClusterPoolAvailableUpgradeList - collection of cluster pool available upgrade. +type ClusterPoolAvailableUpgradeList struct { + // REQUIRED; Collection of cluster pool available upgrade. + Value []*ClusterPoolAvailableUpgrade + + // The Url of next result page. + NextLink *string +} + +// ClusterPoolAvailableUpgradeNodeOsUpgradeProperties - Cluster pool available node OS update. +type ClusterPoolAvailableUpgradeNodeOsUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterPoolAvailableUpgradeType + + // The latest node OS version. + LatestVersion *string +} + +// GetClusterPoolAvailableUpgradeProperties implements the ClusterPoolAvailableUpgradePropertiesClassification interface for +// type ClusterPoolAvailableUpgradeNodeOsUpgradeProperties. +func (c *ClusterPoolAvailableUpgradeNodeOsUpgradeProperties) GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties { + return &ClusterPoolAvailableUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + +// ClusterPoolAvailableUpgradeProperties - Cluster pool available upgrade properties. +type ClusterPoolAvailableUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterPoolAvailableUpgradeType +} + +// GetClusterPoolAvailableUpgradeProperties implements the ClusterPoolAvailableUpgradePropertiesClassification interface for +// type ClusterPoolAvailableUpgradeProperties. +func (c *ClusterPoolAvailableUpgradeProperties) GetClusterPoolAvailableUpgradeProperties() *ClusterPoolAvailableUpgradeProperties { + return c +} + // ClusterPoolListResult - The list cluster pools operation response. type ClusterPoolListResult struct { // The list of cluster pools. @@ -275,6 +520,19 @@ type ClusterPoolListResult struct { NextLink *string } +// ClusterPoolNodeOsImageUpdateProperties - Properties of upgrading cluster pool's AKS patch version. +type ClusterPoolNodeOsImageUpdateProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterPoolUpgradeType +} + +// GetClusterPoolUpgradeProperties implements the ClusterPoolUpgradePropertiesClassification interface for type ClusterPoolNodeOsImageUpdateProperties. +func (c *ClusterPoolNodeOsImageUpdateProperties) GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties { + return &ClusterPoolUpgradeProperties{ + UpgradeType: c.UpgradeType, + } +} + // ClusterPoolResourceProperties - Cluster pool resource properties. type ClusterPoolResourceProperties struct { // REQUIRED; CLuster pool compute profile. @@ -352,6 +610,36 @@ type ClusterPoolResourcePropertiesLogAnalyticsProfile struct { type ClusterPoolResourcePropertiesNetworkProfile struct { // REQUIRED; Cluster pool subnet resource id. SubnetID *string + + // IP ranges are specified in CIDR format, e.g. 137.117.106.88/29. This feature is not compatible with private AKS clusters. + // So you cannot set enablePrivateApiServer to true and + // apiServerAuthorizedIpRanges at the same time. + APIServerAuthorizedIPRanges []*string + + // ClusterPool is based on AKS cluster. AKS cluster exposes the API server to public internet by default. If you set this + // property to true, a private AKS cluster will be created, and it will use private + // apiserver, which is not exposed to public internet. + EnablePrivateAPIServer *bool + + // This can only be set at cluster pool creation time and cannot be changed later. + OutboundType *OutboundType +} + +// ClusterPoolUpgrade - Cluster Pool Upgrade. +type ClusterPoolUpgrade struct { + // REQUIRED; Properties of upgrading cluster pool. + Properties ClusterPoolUpgradePropertiesClassification +} + +// ClusterPoolUpgradeProperties - Properties of upgrading cluster pool. +type ClusterPoolUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterPoolUpgradeType +} + +// GetClusterPoolUpgradeProperties implements the ClusterPoolUpgradePropertiesClassification interface for type ClusterPoolUpgradeProperties. +func (c *ClusterPoolUpgradeProperties) GetClusterPoolUpgradeProperties() *ClusterPoolUpgradeProperties { + return c } // ClusterPoolVersion - Available cluster pool version. @@ -401,20 +689,23 @@ type ClusterProfile struct { // REQUIRED; Version with 3/4 part. ClusterVersion *string - // REQUIRED; Identity Profile with details of an MSI. - IdentityProfile *IdentityProfile - // REQUIRED; Version with three part. OssVersion *string // This is the Autoscale profile for the cluster. This will allow customer to create cluster enabled with Autoscale. AutoscaleProfile *AutoscaleProfile + // Cluster access profile. + ClusterAccessProfile *ClusterAccessProfile + // The Flink cluster profile. FlinkProfile *FlinkProfile - // Kafka cluster profile. - KafkaProfile map[string]any + // This property is required by Trino, Spark and Flink cluster but is optional for Kafka cluster. + IdentityProfile *IdentityProfile + + // The Kafka cluster profile. + KafkaProfile *KafkaProfile // LLAP cluster profile. LlapProfile map[string]any @@ -425,6 +716,12 @@ type ClusterProfile struct { // Cluster Prometheus profile. PrometheusProfile *ClusterPrometheusProfile + // Cluster Ranger plugin profile. + RangerPluginProfile *ClusterRangerPluginProfile + + // The ranger cluster profile. + RangerProfile *RangerProfile + // Ssh profile for the cluster. SSHProfile *SSHProfile @@ -459,6 +756,12 @@ type ClusterPrometheusProfile struct { Enabled *bool } +// ClusterRangerPluginProfile - Cluster Ranger plugin profile. +type ClusterRangerPluginProfile struct { + // REQUIRED; Enable Ranger for cluster or not. + Enabled *bool +} + // ClusterResizeData - The parameters for resizing a cluster. type ClusterResizeData struct { // REQUIRED; The geo-location where the resource lives @@ -528,6 +831,21 @@ type ClusterServiceConfigsProfile struct { ServiceName *string } +// ClusterUpgrade - Cluster Upgrade. +type ClusterUpgrade struct { + // REQUIRED; Properties of upgrading cluster. + Properties ClusterUpgradePropertiesClassification +} + +// ClusterUpgradeProperties - Properties of upgrading cluster. +type ClusterUpgradeProperties struct { + // REQUIRED; Type of upgrade. + UpgradeType *ClusterUpgradeType +} + +// GetClusterUpgradeProperties implements the ClusterUpgradePropertiesClassification interface for type ClusterUpgradeProperties. +func (c *ClusterUpgradeProperties) GetClusterUpgradeProperties() *ClusterUpgradeProperties { return c } + // ClusterVersion - Available cluster version. type ClusterVersion struct { // Cluster version properties. @@ -614,6 +932,19 @@ type ConnectivityProfile struct { type ConnectivityProfileWeb struct { // REQUIRED; Web connectivity endpoint. Fqdn *string + + // Private web connectivity endpoint. This property will only be returned when enableInternalIngress is true. + PrivateFqdn *string +} + +// DiskStorageProfile - Kafka disk storage profile. +type DiskStorageProfile struct { + // REQUIRED; Managed Disk size in GB. The maximum supported disk size for Standard and Premium HDD/SSD is 32TB, except for + // Premium SSD v2, which supports up to 64TB. + DataDiskSize *int32 + + // REQUIRED; Managed Disk Type. + DataDiskType *DataDiskType } // FlinkCatalogOptions - Flink cluster catalog options. @@ -624,21 +955,47 @@ type FlinkCatalogOptions struct { // FlinkHiveCatalogOption - Hive Catalog Option for Flink cluster. type FlinkHiveCatalogOption struct { - // REQUIRED; Secret reference name from secretsProfile.secrets containing password for database connection. - MetastoreDbConnectionPasswordSecret *string - // REQUIRED; Connection string for hive metastore database. MetastoreDbConnectionURL *string - // REQUIRED; User name for database connection. + // The authentication mode to connect to your Hive metastore database. More details: + // https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization + MetastoreDbConnectionAuthenticationMode *MetastoreDbConnectionAuthenticationMode + + // Secret reference name from secretsProfile.secrets containing password for database connection. + MetastoreDbConnectionPasswordSecret *string + + // User name for database connection. MetastoreDbConnectionUserName *string } +// FlinkJobProfile - Job specifications for flink clusters in application deployment mode. The specification is immutable +// even if job properties are changed by calling the RunJob API, please use the ListJob API to get the +// latest job information. +type FlinkJobProfile struct { + // REQUIRED; A string property that represents the name of the job JAR. + JarName *string + + // REQUIRED; A string property that specifies the directory where the job JAR is located. + JobJarDirectory *string + + // REQUIRED; A string property that indicates the upgrade mode to be performed on the Flink job. It can have one of the following + // enum values => STATELESSUPDATE, UPDATE, LASTSTATE_UPDATE. + UpgradeMode *UpgradeMode + + // A string property representing additional JVM arguments for the Flink job. It should be space separated value. + Args *string + + // A string property that specifies the entry class for the Flink job. If not specified, the entry point is auto-detected + // from the flink job jar package. + EntryClass *string + + // A string property that represents the name of the savepoint for the Flink job + SavePointName *string +} + // FlinkJobProperties - Properties of flink job. type FlinkJobProperties struct { - // REQUIRED; Name of job - JobName *string - // REQUIRED; Type of cluster job. JobType *JobType @@ -664,6 +1021,12 @@ type FlinkJobProperties struct { // A string property that specifies the directory where the job JAR is located. JobJarDirectory *string + // Name of job + JobName *string + + // Run id of job + RunID *string + // A string property that represents the name of the savepoint for the Flink job SavePointName *string @@ -704,9 +1067,18 @@ type FlinkProfile struct { // Flink cluster catalog options. CatalogOptions *FlinkCatalogOptions + // A string property that indicates the deployment mode of Flink cluster. It can have one of the following enum values => + // Application, Session. Default value is Session + DeploymentMode *DeploymentMode + // History Server container/ process CPU and memory requirements HistoryServer *ComputeResourceDefinition + // Job specifications for flink clusters in application deployment mode. The specification is immutable even if job properties + // are changed by calling the RunJob API, please use the ListJob API to get the + // latest job information. + JobSpec *FlinkJobProfile + // The number of task managers. NumReplicas *int32 } @@ -725,17 +1097,21 @@ type HiveCatalogOption struct { // REQUIRED; Name of trino catalog which should use specified hive metastore. CatalogName *string - // REQUIRED; Secret reference name from secretsProfile.secrets containing password for database connection. - MetastoreDbConnectionPasswordSecret *string - // REQUIRED; Connection string for hive metastore database. MetastoreDbConnectionURL *string - // REQUIRED; User name for database connection. - MetastoreDbConnectionUserName *string - // REQUIRED; Metastore root directory URI, format: abfs[s]://@.dfs.core.windows.net/. More details: https://docs.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction-abfs-uri MetastoreWarehouseDir *string + + // The authentication mode to connect to your Hive metastore database. More details: + // https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization + MetastoreDbConnectionAuthenticationMode *MetastoreDbConnectionAuthenticationMode + + // Secret reference name from secretsProfile.secrets containing password for database connection. + MetastoreDbConnectionPasswordSecret *string + + // User name for database connection. + MetastoreDbConnectionUserName *string } // IdentityProfile - Identity Profile with details of an MSI. @@ -750,6 +1126,36 @@ type IdentityProfile struct { MsiResourceID *string } +// KafkaConnectivityEndpoints - Kafka bootstrap server and broker related connectivity endpoints. +type KafkaConnectivityEndpoints struct { + // bootstrap server connectivity endpoint. + BootstrapServerEndpoint *string + + // Kafka broker endpoint list. + BrokerEndpoints []*string +} + +// KafkaProfile - The Kafka cluster profile. +type KafkaProfile struct { + // REQUIRED; Kafka disk storage profile. + DiskStorage *DiskStorageProfile + + // Expose Kafka cluster in KRaft mode. + EnableKRaft *bool + + // Expose worker nodes as public endpoints. + EnablePublicEndpoints *bool + + // Fully qualified path of Azure Storage container used for Tiered Storage. + RemoteStorageURI *string + + // READ-ONLY; Identity of the internal service components inside the Kafka cluster. + ClusterIdentity *IdentityProfile + + // READ-ONLY; Kafka bootstrap server and brokers related connectivity endpoints. + ConnectivityEndpoints *KafkaConnectivityEndpoints +} + // LoadBasedConfig - Profile of load based Autoscale. type LoadBasedConfig struct { // REQUIRED; User needs to set the maximum number of nodes for load based scaling, the load based scaling will use this to @@ -855,10 +1261,73 @@ type OperationListResult struct { Value []*Operation } +// RangerAdminSpec - Specification for the Ranger Admin service. +type RangerAdminSpec struct { + // REQUIRED; List of usernames that should be marked as ranger admins. These usernames should match the user principal name + // (UPN) of the respective AAD users. + Admins []*string + + // REQUIRED + Database *RangerAdminSpecDatabase +} + +type RangerAdminSpecDatabase struct { + // REQUIRED; The database URL + Host *string + + // REQUIRED; The database name + Name *string + + // Reference for the database password + PasswordSecretRef *string + + // The name of the database user + Username *string +} + +// RangerAuditSpec - Properties required to describe audit log storage. +type RangerAuditSpec struct { + // Azure storage location of the blobs. MSI should have read/write access to this Storage account. + StorageAccount *string +} + +// RangerProfile - The ranger cluster profile. +type RangerProfile struct { + // REQUIRED; Specification for the Ranger Admin service. + RangerAdmin *RangerAdminSpec + + // REQUIRED; Specification for the Ranger Usersync service + RangerUsersync *RangerUsersyncSpec + + // Properties required to describe audit log storage. + RangerAudit *RangerAuditSpec +} + +// RangerUsersyncSpec - Specification for the Ranger Usersync service +type RangerUsersyncSpec struct { + // Denotes whether usersync service should be enabled + Enabled *bool + + // List of groups that should be synced. These group names should match the object id of the respective AAD groups. + Groups []*string + + // User & groups can be synced automatically or via a static list that's refreshed. + Mode *RangerUsersyncMode + + // Azure storage location of a mapping file that lists user & group associations. + UserMappingLocation *string + + // List of user names that should be synced. These usernames should match the User principal name of the respective AAD users. + Users []*string +} + // SSHConnectivityEndpoint - SSH connectivity endpoint details. type SSHConnectivityEndpoint struct { // REQUIRED; SSH connectivity endpoint. Endpoint *string + + // Private SSH connectivity endpoint. This property will only be returned when enableInternalIngress is true. + PrivateSSHEndpoint *string } // SSHProfile - Ssh profile for the cluster. @@ -1016,7 +1485,7 @@ type ServiceConfigResultProperties struct { Type *string } -// ServiceStatus - Describes the status of a service of a HDInsight on aks cluster. +// ServiceStatus - Describes the status of a service of a HDInsight on AKS cluster. type ServiceStatus struct { // REQUIRED; Kind of the service. E.g. "Zookeeper". Kind *string @@ -1033,16 +1502,20 @@ type SparkMetastoreSpec struct { // REQUIRED; The database name. DbName *string - // REQUIRED; The secret name which contains the database user password. - DbPasswordSecretName *string - // REQUIRED; The database server host. DbServerHost *string - // REQUIRED; The database user name. + // The authentication mode to connect to your Hive metastore database. More details: + // https://learn.microsoft.com/en-us/azure/azure-sql/database/logins-create-manage?view=azuresql#authentication-and-authorization + DbConnectionAuthenticationMode *DbConnectionAuthenticationMode + + // The secret name which contains the database user password. + DbPasswordSecretName *string + + // The database user name. DbUserName *string - // REQUIRED; The key vault resource id. + // The key vault resource id. KeyVaultID *string // The thrift url. @@ -1200,6 +1673,12 @@ type UpdatableClusterProfile struct { // Cluster Prometheus profile. PrometheusProfile *ClusterPrometheusProfile + // Cluster Ranger plugin profile. + RangerPluginProfile *ClusterRangerPluginProfile + + // The ranger cluster profile. + RangerProfile *RangerProfile + // Ssh profile for the cluster. SSHProfile *SSHProfile diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models_serde.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models_serde.go index 95b16c3e6074..a9145bbf6ae2 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models_serde.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/models_serde.go @@ -202,6 +202,275 @@ func (c *Cluster) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterAKSPatchVersionUpgradeProperties. +func (c ClusterAKSPatchVersionUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["upgradeType"] = ClusterUpgradeTypeAKSPatchUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAKSPatchVersionUpgradeProperties. +func (c *ClusterAKSPatchVersionUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterAccessProfile. +func (c ClusterAccessProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enableInternalIngress", c.EnableInternalIngress) + populate(objectMap, "privateLinkServiceId", c.PrivateLinkServiceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAccessProfile. +func (c *ClusterAccessProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enableInternalIngress": + err = unpopulate(val, "EnableInternalIngress", &c.EnableInternalIngress) + delete(rawMsg, key) + case "privateLinkServiceId": + err = unpopulate(val, "PrivateLinkServiceID", &c.PrivateLinkServiceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterAvailableUpgrade. +func (c ClusterAvailableUpgrade) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", c.ID) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "systemData", c.SystemData) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAvailableUpgrade. +func (c *ClusterAvailableUpgrade) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "properties": + c.Properties, err = unmarshalClusterAvailableUpgradePropertiesClassification(val) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &c.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterAvailableUpgradeAksPatchUpgradeProperties. +func (c ClusterAvailableUpgradeAksPatchUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "currentVersion", c.CurrentVersion) + populate(objectMap, "currentVersionStatus", c.CurrentVersionStatus) + populate(objectMap, "latestVersion", c.LatestVersion) + objectMap["upgradeType"] = ClusterAvailableUpgradeTypeAKSPatchUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAvailableUpgradeAksPatchUpgradeProperties. +func (c *ClusterAvailableUpgradeAksPatchUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "currentVersion": + err = unpopulate(val, "CurrentVersion", &c.CurrentVersion) + delete(rawMsg, key) + case "currentVersionStatus": + err = unpopulate(val, "CurrentVersionStatus", &c.CurrentVersionStatus) + delete(rawMsg, key) + case "latestVersion": + err = unpopulate(val, "LatestVersion", &c.LatestVersion) + delete(rawMsg, key) + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterAvailableUpgradeHotfixUpgradeProperties. +func (c ClusterAvailableUpgradeHotfixUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "componentName", c.ComponentName) + populateDateTimeRFC3339(objectMap, "createdTime", c.CreatedTime) + populate(objectMap, "description", c.Description) + populate(objectMap, "extendedProperties", c.ExtendedProperties) + populate(objectMap, "severity", c.Severity) + populate(objectMap, "sourceBuildNumber", c.SourceBuildNumber) + populate(objectMap, "sourceClusterVersion", c.SourceClusterVersion) + populate(objectMap, "sourceOssVersion", c.SourceOssVersion) + populate(objectMap, "targetBuildNumber", c.TargetBuildNumber) + populate(objectMap, "targetClusterVersion", c.TargetClusterVersion) + populate(objectMap, "targetOssVersion", c.TargetOssVersion) + objectMap["upgradeType"] = ClusterAvailableUpgradeTypeHotfixUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAvailableUpgradeHotfixUpgradeProperties. +func (c *ClusterAvailableUpgradeHotfixUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "componentName": + err = unpopulate(val, "ComponentName", &c.ComponentName) + delete(rawMsg, key) + case "createdTime": + err = unpopulateDateTimeRFC3339(val, "CreatedTime", &c.CreatedTime) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &c.Description) + delete(rawMsg, key) + case "extendedProperties": + err = unpopulate(val, "ExtendedProperties", &c.ExtendedProperties) + delete(rawMsg, key) + case "severity": + err = unpopulate(val, "Severity", &c.Severity) + delete(rawMsg, key) + case "sourceBuildNumber": + err = unpopulate(val, "SourceBuildNumber", &c.SourceBuildNumber) + delete(rawMsg, key) + case "sourceClusterVersion": + err = unpopulate(val, "SourceClusterVersion", &c.SourceClusterVersion) + delete(rawMsg, key) + case "sourceOssVersion": + err = unpopulate(val, "SourceOssVersion", &c.SourceOssVersion) + delete(rawMsg, key) + case "targetBuildNumber": + err = unpopulate(val, "TargetBuildNumber", &c.TargetBuildNumber) + delete(rawMsg, key) + case "targetClusterVersion": + err = unpopulate(val, "TargetClusterVersion", &c.TargetClusterVersion) + delete(rawMsg, key) + case "targetOssVersion": + err = unpopulate(val, "TargetOssVersion", &c.TargetOssVersion) + delete(rawMsg, key) + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterAvailableUpgradeList. +func (c ClusterAvailableUpgradeList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAvailableUpgradeList. +func (c *ClusterAvailableUpgradeList) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterAvailableUpgradeProperties. +func (c ClusterAvailableUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["upgradeType"] = c.UpgradeType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterAvailableUpgradeProperties. +func (c *ClusterAvailableUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterComponentsItem. func (c ClusterComponentsItem) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -276,6 +545,49 @@ func (c *ClusterConfigFile) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterHotfixUpgradeProperties. +func (c ClusterHotfixUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "componentName", c.ComponentName) + populate(objectMap, "targetBuildNumber", c.TargetBuildNumber) + populate(objectMap, "targetClusterVersion", c.TargetClusterVersion) + populate(objectMap, "targetOssVersion", c.TargetOssVersion) + objectMap["upgradeType"] = ClusterUpgradeTypeHotfixUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterHotfixUpgradeProperties. +func (c *ClusterHotfixUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "componentName": + err = unpopulate(val, "ComponentName", &c.ComponentName) + delete(rawMsg, key) + case "targetBuildNumber": + err = unpopulate(val, "TargetBuildNumber", &c.TargetBuildNumber) + delete(rawMsg, key) + case "targetClusterVersion": + err = unpopulate(val, "TargetClusterVersion", &c.TargetClusterVersion) + delete(rawMsg, key) + case "targetOssVersion": + err = unpopulate(val, "TargetOssVersion", &c.TargetOssVersion) + delete(rawMsg, key) + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterInstanceViewPropertiesStatus. func (c ClusterInstanceViewPropertiesStatus) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -605,13 +917,8 @@ func (c *ClusterLogAnalyticsProfile) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type ClusterPatch. func (c ClusterPatch) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) - populate(objectMap, "id", c.ID) - populate(objectMap, "location", c.Location) - populate(objectMap, "name", c.Name) populate(objectMap, "properties", c.Properties) - populate(objectMap, "systemData", c.SystemData) populate(objectMap, "tags", c.Tags) - populate(objectMap, "type", c.Type) return json.Marshal(objectMap) } @@ -624,27 +931,12 @@ func (c *ClusterPatch) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { - case "id": - err = unpopulate(val, "ID", &c.ID) - delete(rawMsg, key) - case "location": - err = unpopulate(val, "Location", &c.Location) - delete(rawMsg, key) - case "name": - err = unpopulate(val, "Name", &c.Name) - delete(rawMsg, key) case "properties": err = unpopulate(val, "Properties", &c.Properties) delete(rawMsg, key) - case "systemData": - err = unpopulate(val, "SystemData", &c.SystemData) - delete(rawMsg, key) case "tags": err = unpopulate(val, "Tags", &c.Tags) delete(rawMsg, key) - case "type": - err = unpopulate(val, "Type", &c.Type) - delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -731,6 +1023,216 @@ func (c *ClusterPool) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolAKSPatchVersionUpgradeProperties. +func (c ClusterPoolAKSPatchVersionUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "targetAksVersion", c.TargetAksVersion) + populate(objectMap, "upgradeAllClusterNodes", c.UpgradeAllClusterNodes) + populate(objectMap, "upgradeClusterPool", c.UpgradeClusterPool) + objectMap["upgradeType"] = ClusterPoolUpgradeTypeAKSPatchUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolAKSPatchVersionUpgradeProperties. +func (c *ClusterPoolAKSPatchVersionUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "targetAksVersion": + err = unpopulate(val, "TargetAksVersion", &c.TargetAksVersion) + delete(rawMsg, key) + case "upgradeAllClusterNodes": + err = unpopulate(val, "UpgradeAllClusterNodes", &c.UpgradeAllClusterNodes) + delete(rawMsg, key) + case "upgradeClusterPool": + err = unpopulate(val, "UpgradeClusterPool", &c.UpgradeClusterPool) + delete(rawMsg, key) + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolAvailableUpgrade. +func (c ClusterPoolAvailableUpgrade) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", c.ID) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "systemData", c.SystemData) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolAvailableUpgrade. +func (c *ClusterPoolAvailableUpgrade) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "properties": + c.Properties, err = unmarshalClusterPoolAvailableUpgradePropertiesClassification(val) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &c.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolAvailableUpgradeAksPatchUpgradeProperties. +func (c ClusterPoolAvailableUpgradeAksPatchUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "currentVersion", c.CurrentVersion) + populate(objectMap, "currentVersionStatus", c.CurrentVersionStatus) + populate(objectMap, "latestVersion", c.LatestVersion) + objectMap["upgradeType"] = ClusterPoolAvailableUpgradeTypeAKSPatchUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolAvailableUpgradeAksPatchUpgradeProperties. +func (c *ClusterPoolAvailableUpgradeAksPatchUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "currentVersion": + err = unpopulate(val, "CurrentVersion", &c.CurrentVersion) + delete(rawMsg, key) + case "currentVersionStatus": + err = unpopulate(val, "CurrentVersionStatus", &c.CurrentVersionStatus) + delete(rawMsg, key) + case "latestVersion": + err = unpopulate(val, "LatestVersion", &c.LatestVersion) + delete(rawMsg, key) + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolAvailableUpgradeList. +func (c ClusterPoolAvailableUpgradeList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolAvailableUpgradeList. +func (c *ClusterPoolAvailableUpgradeList) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolAvailableUpgradeNodeOsUpgradeProperties. +func (c ClusterPoolAvailableUpgradeNodeOsUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "latestVersion", c.LatestVersion) + objectMap["upgradeType"] = ClusterPoolAvailableUpgradeTypeNodeOsUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolAvailableUpgradeNodeOsUpgradeProperties. +func (c *ClusterPoolAvailableUpgradeNodeOsUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "latestVersion": + err = unpopulate(val, "LatestVersion", &c.LatestVersion) + delete(rawMsg, key) + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolAvailableUpgradeProperties. +func (c ClusterPoolAvailableUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["upgradeType"] = c.UpgradeType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolAvailableUpgradeProperties. +func (c *ClusterPoolAvailableUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterPoolListResult. func (c ClusterPoolListResult) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -762,6 +1264,33 @@ func (c *ClusterPoolListResult) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolNodeOsImageUpdateProperties. +func (c ClusterPoolNodeOsImageUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["upgradeType"] = ClusterPoolUpgradeTypeNodeOsUpgrade + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolNodeOsImageUpdateProperties. +func (c *ClusterPoolNodeOsImageUpdateProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterPoolResourceProperties. func (c ClusterPoolResourceProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -952,6 +1481,9 @@ func (c *ClusterPoolResourcePropertiesLogAnalyticsProfile) UnmarshalJSON(data [] // MarshalJSON implements the json.Marshaller interface for type ClusterPoolResourcePropertiesNetworkProfile. func (c ClusterPoolResourcePropertiesNetworkProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "apiServerAuthorizedIpRanges", c.APIServerAuthorizedIPRanges) + populate(objectMap, "enablePrivateApiServer", c.EnablePrivateAPIServer) + populate(objectMap, "outboundType", c.OutboundType) populate(objectMap, "subnetId", c.SubnetID) return json.Marshal(objectMap) } @@ -965,6 +1497,15 @@ func (c *ClusterPoolResourcePropertiesNetworkProfile) UnmarshalJSON(data []byte) for key, val := range rawMsg { var err error switch key { + case "apiServerAuthorizedIpRanges": + err = unpopulate(val, "APIServerAuthorizedIPRanges", &c.APIServerAuthorizedIPRanges) + delete(rawMsg, key) + case "enablePrivateApiServer": + err = unpopulate(val, "EnablePrivateAPIServer", &c.EnablePrivateAPIServer) + delete(rawMsg, key) + case "outboundType": + err = unpopulate(val, "OutboundType", &c.OutboundType) + delete(rawMsg, key) case "subnetId": err = unpopulate(val, "SubnetID", &c.SubnetID) delete(rawMsg, key) @@ -976,6 +1517,60 @@ func (c *ClusterPoolResourcePropertiesNetworkProfile) UnmarshalJSON(data []byte) return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolUpgrade. +func (c ClusterPoolUpgrade) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "properties", c.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolUpgrade. +func (c *ClusterPoolUpgrade) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "properties": + c.Properties, err = unmarshalClusterPoolUpgradePropertiesClassification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterPoolUpgradeProperties. +func (c ClusterPoolUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["upgradeType"] = c.UpgradeType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterPoolUpgradeProperties. +func (c *ClusterPoolUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterPoolVersion. func (c ClusterPoolVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1090,6 +1685,7 @@ func (c ClusterProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "authorizationProfile", c.AuthorizationProfile) populate(objectMap, "autoscaleProfile", c.AutoscaleProfile) + populate(objectMap, "clusterAccessProfile", c.ClusterAccessProfile) populate(objectMap, "clusterVersion", c.ClusterVersion) populate(objectMap, "components", c.Components) populate(objectMap, "connectivityProfile", c.ConnectivityProfile) @@ -1100,6 +1696,8 @@ func (c ClusterProfile) MarshalJSON() ([]byte, error) { populate(objectMap, "logAnalyticsProfile", c.LogAnalyticsProfile) populate(objectMap, "ossVersion", c.OssVersion) populate(objectMap, "prometheusProfile", c.PrometheusProfile) + populate(objectMap, "rangerPluginProfile", c.RangerPluginProfile) + populate(objectMap, "rangerProfile", c.RangerProfile) populate(objectMap, "sshProfile", c.SSHProfile) populate(objectMap, "scriptActionProfiles", c.ScriptActionProfiles) populate(objectMap, "secretsProfile", c.SecretsProfile) @@ -1125,6 +1723,9 @@ func (c *ClusterProfile) UnmarshalJSON(data []byte) error { case "autoscaleProfile": err = unpopulate(val, "AutoscaleProfile", &c.AutoscaleProfile) delete(rawMsg, key) + case "clusterAccessProfile": + err = unpopulate(val, "ClusterAccessProfile", &c.ClusterAccessProfile) + delete(rawMsg, key) case "clusterVersion": err = unpopulate(val, "ClusterVersion", &c.ClusterVersion) delete(rawMsg, key) @@ -1155,6 +1756,12 @@ func (c *ClusterProfile) UnmarshalJSON(data []byte) error { case "prometheusProfile": err = unpopulate(val, "PrometheusProfile", &c.PrometheusProfile) delete(rawMsg, key) + case "rangerPluginProfile": + err = unpopulate(val, "RangerPluginProfile", &c.RangerPluginProfile) + delete(rawMsg, key) + case "rangerProfile": + err = unpopulate(val, "RangerProfile", &c.RangerProfile) + delete(rawMsg, key) case "sshProfile": err = unpopulate(val, "SSHProfile", &c.SSHProfile) delete(rawMsg, key) @@ -1211,6 +1818,33 @@ func (c *ClusterPrometheusProfile) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterRangerPluginProfile. +func (c ClusterRangerPluginProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", c.Enabled) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterRangerPluginProfile. +func (c *ClusterRangerPluginProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &c.Enabled) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterResizeData. func (c ClusterResizeData) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1398,6 +2032,60 @@ func (c *ClusterServiceConfigsProfile) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type ClusterUpgrade. +func (c ClusterUpgrade) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "properties", c.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterUpgrade. +func (c *ClusterUpgrade) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "properties": + c.Properties, err = unmarshalClusterUpgradePropertiesClassification(val) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ClusterUpgradeProperties. +func (c ClusterUpgradeProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + objectMap["upgradeType"] = c.UpgradeType + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ClusterUpgradeProperties. +func (c *ClusterUpgradeProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "upgradeType": + err = unpopulate(val, "UpgradeType", &c.UpgradeType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type ClusterVersion. func (c ClusterVersion) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1643,6 +2331,7 @@ func (c *ConnectivityProfile) UnmarshalJSON(data []byte) error { func (c ConnectivityProfileWeb) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "fqdn", c.Fqdn) + populate(objectMap, "privateFqdn", c.PrivateFqdn) return json.Marshal(objectMap) } @@ -1658,6 +2347,9 @@ func (c *ConnectivityProfileWeb) UnmarshalJSON(data []byte) error { case "fqdn": err = unpopulate(val, "Fqdn", &c.Fqdn) delete(rawMsg, key) + case "privateFqdn": + err = unpopulate(val, "PrivateFqdn", &c.PrivateFqdn) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", c, err) @@ -1666,6 +2358,37 @@ func (c *ConnectivityProfileWeb) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type DiskStorageProfile. +func (d DiskStorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dataDiskSize", d.DataDiskSize) + populate(objectMap, "dataDiskType", d.DataDiskType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DiskStorageProfile. +func (d *DiskStorageProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dataDiskSize": + err = unpopulate(val, "DataDiskSize", &d.DataDiskSize) + delete(rawMsg, key) + case "dataDiskType": + err = unpopulate(val, "DataDiskType", &d.DataDiskType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type FlinkCatalogOptions. func (f FlinkCatalogOptions) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1696,6 +2419,7 @@ func (f *FlinkCatalogOptions) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type FlinkHiveCatalogOption. func (f FlinkHiveCatalogOption) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "metastoreDbConnectionAuthenticationMode", f.MetastoreDbConnectionAuthenticationMode) populate(objectMap, "metastoreDbConnectionPasswordSecret", f.MetastoreDbConnectionPasswordSecret) populate(objectMap, "metastoreDbConnectionURL", f.MetastoreDbConnectionURL) populate(objectMap, "metastoreDbConnectionUserName", f.MetastoreDbConnectionUserName) @@ -1711,6 +2435,9 @@ func (f *FlinkHiveCatalogOption) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "metastoreDbConnectionAuthenticationMode": + err = unpopulate(val, "MetastoreDbConnectionAuthenticationMode", &f.MetastoreDbConnectionAuthenticationMode) + delete(rawMsg, key) case "metastoreDbConnectionPasswordSecret": err = unpopulate(val, "MetastoreDbConnectionPasswordSecret", &f.MetastoreDbConnectionPasswordSecret) delete(rawMsg, key) @@ -1728,6 +2455,53 @@ func (f *FlinkHiveCatalogOption) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type FlinkJobProfile. +func (f FlinkJobProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "args", f.Args) + populate(objectMap, "entryClass", f.EntryClass) + populate(objectMap, "jarName", f.JarName) + populate(objectMap, "jobJarDirectory", f.JobJarDirectory) + populate(objectMap, "savePointName", f.SavePointName) + populate(objectMap, "upgradeMode", f.UpgradeMode) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FlinkJobProfile. +func (f *FlinkJobProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "args": + err = unpopulate(val, "Args", &f.Args) + delete(rawMsg, key) + case "entryClass": + err = unpopulate(val, "EntryClass", &f.EntryClass) + delete(rawMsg, key) + case "jarName": + err = unpopulate(val, "JarName", &f.JarName) + delete(rawMsg, key) + case "jobJarDirectory": + err = unpopulate(val, "JobJarDirectory", &f.JobJarDirectory) + delete(rawMsg, key) + case "savePointName": + err = unpopulate(val, "SavePointName", &f.SavePointName) + delete(rawMsg, key) + case "upgradeMode": + err = unpopulate(val, "UpgradeMode", &f.UpgradeMode) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type FlinkJobProperties. func (f FlinkJobProperties) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -1743,6 +2517,7 @@ func (f FlinkJobProperties) MarshalJSON() ([]byte, error) { populate(objectMap, "jobOutput", f.JobOutput) objectMap["jobType"] = JobTypeFlinkJob populate(objectMap, "lastSavePoint", f.LastSavePoint) + populate(objectMap, "runId", f.RunID) populate(objectMap, "savePointName", f.SavePointName) populate(objectMap, "status", f.Status) return json.Marshal(objectMap) @@ -1793,6 +2568,9 @@ func (f *FlinkJobProperties) UnmarshalJSON(data []byte) error { case "lastSavePoint": err = unpopulate(val, "LastSavePoint", &f.LastSavePoint) delete(rawMsg, key) + case "runId": + err = unpopulate(val, "RunID", &f.RunID) + delete(rawMsg, key) case "savePointName": err = unpopulate(val, "SavePointName", &f.SavePointName) delete(rawMsg, key) @@ -1811,8 +2589,10 @@ func (f *FlinkJobProperties) UnmarshalJSON(data []byte) error { func (f FlinkProfile) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "catalogOptions", f.CatalogOptions) + populate(objectMap, "deploymentMode", f.DeploymentMode) populate(objectMap, "historyServer", f.HistoryServer) populate(objectMap, "jobManager", f.JobManager) + populate(objectMap, "jobSpec", f.JobSpec) populate(objectMap, "numReplicas", f.NumReplicas) populate(objectMap, "storage", f.Storage) populate(objectMap, "taskManager", f.TaskManager) @@ -1831,12 +2611,18 @@ func (f *FlinkProfile) UnmarshalJSON(data []byte) error { case "catalogOptions": err = unpopulate(val, "CatalogOptions", &f.CatalogOptions) delete(rawMsg, key) + case "deploymentMode": + err = unpopulate(val, "DeploymentMode", &f.DeploymentMode) + delete(rawMsg, key) case "historyServer": err = unpopulate(val, "HistoryServer", &f.HistoryServer) delete(rawMsg, key) case "jobManager": err = unpopulate(val, "JobManager", &f.JobManager) delete(rawMsg, key) + case "jobSpec": + err = unpopulate(val, "JobSpec", &f.JobSpec) + delete(rawMsg, key) case "numReplicas": err = unpopulate(val, "NumReplicas", &f.NumReplicas) delete(rawMsg, key) @@ -1889,6 +2675,7 @@ func (f *FlinkStorageProfile) UnmarshalJSON(data []byte) error { func (h HiveCatalogOption) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "catalogName", h.CatalogName) + populate(objectMap, "metastoreDbConnectionAuthenticationMode", h.MetastoreDbConnectionAuthenticationMode) populate(objectMap, "metastoreDbConnectionPasswordSecret", h.MetastoreDbConnectionPasswordSecret) populate(objectMap, "metastoreDbConnectionURL", h.MetastoreDbConnectionURL) populate(objectMap, "metastoreDbConnectionUserName", h.MetastoreDbConnectionUserName) @@ -1908,6 +2695,9 @@ func (h *HiveCatalogOption) UnmarshalJSON(data []byte) error { case "catalogName": err = unpopulate(val, "CatalogName", &h.CatalogName) delete(rawMsg, key) + case "metastoreDbConnectionAuthenticationMode": + err = unpopulate(val, "MetastoreDbConnectionAuthenticationMode", &h.MetastoreDbConnectionAuthenticationMode) + delete(rawMsg, key) case "metastoreDbConnectionPasswordSecret": err = unpopulate(val, "MetastoreDbConnectionPasswordSecret", &h.MetastoreDbConnectionPasswordSecret) delete(rawMsg, key) @@ -1963,6 +2753,84 @@ func (i *IdentityProfile) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type KafkaConnectivityEndpoints. +func (k KafkaConnectivityEndpoints) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "bootstrapServerEndpoint", k.BootstrapServerEndpoint) + populate(objectMap, "brokerEndpoints", k.BrokerEndpoints) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KafkaConnectivityEndpoints. +func (k *KafkaConnectivityEndpoints) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "bootstrapServerEndpoint": + err = unpopulate(val, "BootstrapServerEndpoint", &k.BootstrapServerEndpoint) + delete(rawMsg, key) + case "brokerEndpoints": + err = unpopulate(val, "BrokerEndpoints", &k.BrokerEndpoints) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type KafkaProfile. +func (k KafkaProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "clusterIdentity", k.ClusterIdentity) + populate(objectMap, "connectivityEndpoints", k.ConnectivityEndpoints) + populate(objectMap, "diskStorage", k.DiskStorage) + populate(objectMap, "enableKRaft", k.EnableKRaft) + populate(objectMap, "enablePublicEndpoints", k.EnablePublicEndpoints) + populate(objectMap, "remoteStorageUri", k.RemoteStorageURI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type KafkaProfile. +func (k *KafkaProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "clusterIdentity": + err = unpopulate(val, "ClusterIdentity", &k.ClusterIdentity) + delete(rawMsg, key) + case "connectivityEndpoints": + err = unpopulate(val, "ConnectivityEndpoints", &k.ConnectivityEndpoints) + delete(rawMsg, key) + case "diskStorage": + err = unpopulate(val, "DiskStorage", &k.DiskStorage) + delete(rawMsg, key) + case "enableKRaft": + err = unpopulate(val, "EnableKRaft", &k.EnableKRaft) + delete(rawMsg, key) + case "enablePublicEndpoints": + err = unpopulate(val, "EnablePublicEndpoints", &k.EnablePublicEndpoints) + delete(rawMsg, key) + case "remoteStorageUri": + err = unpopulate(val, "RemoteStorageURI", &k.RemoteStorageURI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", k, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type LoadBasedConfig. func (l LoadBasedConfig) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) @@ -2220,10 +3088,186 @@ func (o *OperationListResult) UnmarshalJSON(data []byte) error { return nil } +// MarshalJSON implements the json.Marshaller interface for type RangerAdminSpec. +func (r RangerAdminSpec) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "admins", r.Admins) + populate(objectMap, "database", r.Database) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RangerAdminSpec. +func (r *RangerAdminSpec) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "admins": + err = unpopulate(val, "Admins", &r.Admins) + delete(rawMsg, key) + case "database": + err = unpopulate(val, "Database", &r.Database) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RangerAdminSpecDatabase. +func (r RangerAdminSpecDatabase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "host", r.Host) + populate(objectMap, "name", r.Name) + populate(objectMap, "passwordSecretRef", r.PasswordSecretRef) + populate(objectMap, "username", r.Username) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RangerAdminSpecDatabase. +func (r *RangerAdminSpecDatabase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "host": + err = unpopulate(val, "Host", &r.Host) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &r.Name) + delete(rawMsg, key) + case "passwordSecretRef": + err = unpopulate(val, "PasswordSecretRef", &r.PasswordSecretRef) + delete(rawMsg, key) + case "username": + err = unpopulate(val, "Username", &r.Username) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RangerAuditSpec. +func (r RangerAuditSpec) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "storageAccount", r.StorageAccount) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RangerAuditSpec. +func (r *RangerAuditSpec) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "storageAccount": + err = unpopulate(val, "StorageAccount", &r.StorageAccount) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RangerProfile. +func (r RangerProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "rangerAdmin", r.RangerAdmin) + populate(objectMap, "rangerAudit", r.RangerAudit) + populate(objectMap, "rangerUsersync", r.RangerUsersync) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RangerProfile. +func (r *RangerProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "rangerAdmin": + err = unpopulate(val, "RangerAdmin", &r.RangerAdmin) + delete(rawMsg, key) + case "rangerAudit": + err = unpopulate(val, "RangerAudit", &r.RangerAudit) + delete(rawMsg, key) + case "rangerUsersync": + err = unpopulate(val, "RangerUsersync", &r.RangerUsersync) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RangerUsersyncSpec. +func (r RangerUsersyncSpec) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "enabled", r.Enabled) + populate(objectMap, "groups", r.Groups) + populate(objectMap, "mode", r.Mode) + populate(objectMap, "userMappingLocation", r.UserMappingLocation) + populate(objectMap, "users", r.Users) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RangerUsersyncSpec. +func (r *RangerUsersyncSpec) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enabled": + err = unpopulate(val, "Enabled", &r.Enabled) + delete(rawMsg, key) + case "groups": + err = unpopulate(val, "Groups", &r.Groups) + delete(rawMsg, key) + case "mode": + err = unpopulate(val, "Mode", &r.Mode) + delete(rawMsg, key) + case "userMappingLocation": + err = unpopulate(val, "UserMappingLocation", &r.UserMappingLocation) + delete(rawMsg, key) + case "users": + err = unpopulate(val, "Users", &r.Users) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + // MarshalJSON implements the json.Marshaller interface for type SSHConnectivityEndpoint. func (s SSHConnectivityEndpoint) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "endpoint", s.Endpoint) + populate(objectMap, "privateSshEndpoint", s.PrivateSSHEndpoint) return json.Marshal(objectMap) } @@ -2239,6 +3283,9 @@ func (s *SSHConnectivityEndpoint) UnmarshalJSON(data []byte) error { case "endpoint": err = unpopulate(val, "Endpoint", &s.Endpoint) delete(rawMsg, key) + case "privateSshEndpoint": + err = unpopulate(val, "PrivateSSHEndpoint", &s.PrivateSSHEndpoint) + delete(rawMsg, key) } if err != nil { return fmt.Errorf("unmarshalling type %T: %v", s, err) @@ -2694,6 +3741,7 @@ func (s *ServiceStatus) UnmarshalJSON(data []byte) error { // MarshalJSON implements the json.Marshaller interface for type SparkMetastoreSpec. func (s SparkMetastoreSpec) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) + populate(objectMap, "dbConnectionAuthenticationMode", s.DbConnectionAuthenticationMode) populate(objectMap, "dbName", s.DbName) populate(objectMap, "dbPasswordSecretName", s.DbPasswordSecretName) populate(objectMap, "dbServerHost", s.DbServerHost) @@ -2712,6 +3760,9 @@ func (s *SparkMetastoreSpec) UnmarshalJSON(data []byte) error { for key, val := range rawMsg { var err error switch key { + case "dbConnectionAuthenticationMode": + err = unpopulate(val, "DbConnectionAuthenticationMode", &s.DbConnectionAuthenticationMode) + delete(rawMsg, key) case "dbName": err = unpopulate(val, "DbName", &s.DbName) delete(rawMsg, key) @@ -3172,6 +4223,8 @@ func (u UpdatableClusterProfile) MarshalJSON() ([]byte, error) { populate(objectMap, "autoscaleProfile", u.AutoscaleProfile) populate(objectMap, "logAnalyticsProfile", u.LogAnalyticsProfile) populate(objectMap, "prometheusProfile", u.PrometheusProfile) + populate(objectMap, "rangerPluginProfile", u.RangerPluginProfile) + populate(objectMap, "rangerProfile", u.RangerProfile) populate(objectMap, "sshProfile", u.SSHProfile) populate(objectMap, "scriptActionProfiles", u.ScriptActionProfiles) populate(objectMap, "serviceConfigsProfiles", u.ServiceConfigsProfiles) @@ -3199,6 +4252,12 @@ func (u *UpdatableClusterProfile) UnmarshalJSON(data []byte) error { case "prometheusProfile": err = unpopulate(val, "PrometheusProfile", &u.PrometheusProfile) delete(rawMsg, key) + case "rangerPluginProfile": + err = unpopulate(val, "RangerPluginProfile", &u.RangerPluginProfile) + delete(rawMsg, key) + case "rangerProfile": + err = unpopulate(val, "RangerProfile", &u.RangerProfile) + delete(rawMsg, key) case "sshProfile": err = unpopulate(val, "SSHProfile", &u.SSHProfile) delete(rawMsg, key) @@ -3227,7 +4286,7 @@ func populate(m map[string]any, k string, v any) { } func unpopulate(data json.RawMessage, fn string, v any) error { - if data == nil { + if data == nil || string(data) == "null" { return nil } if err := json.Unmarshal(data, v); err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client.go index a40d7e9da211..041f889ad372 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client.go @@ -39,7 +39,7 @@ func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientO // NewListPager - Returns list of operations. // -// Generated from API version 2023-06-01-preview +// Generated from API version 2023-11-01-preview // - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ @@ -72,7 +72,7 @@ func (client *OperationsClient) listCreateRequest(ctx context.Context, options * return nil, err } reqQP := req.Raw().URL.Query() - reqQP.Set("api-version", "2023-06-01-preview") + reqQP.Set("api-version", "2023-11-01-preview") req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().Header["Accept"] = []string{"application/json"} return req, nil diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client_example_test.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client_example_test.go index dc628bd2c9d3..6adbbd8663f8 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client_example_test.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/operations_client_example_test.go @@ -17,7 +17,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers" ) -// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/7f70e351393addbc31d790a908c994c7c8644d9c/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-06-01-preview/examples/GetOperations.json +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/92de53a5f1e0e03c94b40475d2135d97148ed014/specification/hdinsight/resource-manager/Microsoft.HDInsight/HDInsightOnAks/preview/2023-11-01-preview/examples/GetOperations.json func ExampleOperationsClient_NewListPager() { cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/options.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/options.go index fe5856aab0d4..8072ad8a314e 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/options.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/options.go @@ -20,6 +20,12 @@ type AvailableClusterVersionsClientListByLocationOptions struct { // placeholder for future optional parameters } +// ClusterAvailableUpgradesClientListOptions contains the optional parameters for the ClusterAvailableUpgradesClient.NewListPager +// method. +type ClusterAvailableUpgradesClientListOptions struct { + // placeholder for future optional parameters +} + // ClusterJobsClientBeginRunJobOptions contains the optional parameters for the ClusterJobsClient.BeginRunJob method. type ClusterJobsClientBeginRunJobOptions struct { // Resumes the LRO from the provided token. @@ -28,6 +34,14 @@ type ClusterJobsClientBeginRunJobOptions struct { // ClusterJobsClientListOptions contains the optional parameters for the ClusterJobsClient.NewListPager method. type ClusterJobsClientListOptions struct { + // The system query option to filter job returned in the response. Allowed value is 'jobName eq {jobName}' or 'jarName eq + // {jarName}'. + Filter *string +} + +// ClusterPoolAvailableUpgradesClientListOptions contains the optional parameters for the ClusterPoolAvailableUpgradesClient.NewListPager +// method. +type ClusterPoolAvailableUpgradesClientListOptions struct { // placeholder for future optional parameters } @@ -50,6 +64,12 @@ type ClusterPoolsClientBeginUpdateTagsOptions struct { ResumeToken string } +// ClusterPoolsClientBeginUpgradeOptions contains the optional parameters for the ClusterPoolsClient.BeginUpgrade method. +type ClusterPoolsClientBeginUpgradeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + // ClusterPoolsClientGetOptions contains the optional parameters for the ClusterPoolsClient.Get method. type ClusterPoolsClientGetOptions struct { // placeholder for future optional parameters @@ -91,6 +111,12 @@ type ClustersClientBeginUpdateOptions struct { ResumeToken string } +// ClustersClientBeginUpgradeOptions contains the optional parameters for the ClustersClient.BeginUpgrade method. +type ClustersClientBeginUpgradeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + // ClustersClientGetInstanceViewOptions contains the optional parameters for the ClustersClient.GetInstanceView method. type ClustersClientGetInstanceViewOptions struct { // placeholder for future optional parameters diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/polymorphic_helpers.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/polymorphic_helpers.go index 25e2ed3bbbe9..8fb684aed239 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/polymorphic_helpers.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/polymorphic_helpers.go @@ -10,8 +10,31 @@ package armhdinsightcontainers import "encoding/json" +func unmarshalClusterAvailableUpgradePropertiesClassification(rawMsg json.RawMessage) (ClusterAvailableUpgradePropertiesClassification, error) { + if rawMsg == nil || string(rawMsg) == "null" { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ClusterAvailableUpgradePropertiesClassification + switch m["upgradeType"] { + case string(ClusterAvailableUpgradeTypeAKSPatchUpgrade): + b = &ClusterAvailableUpgradeAksPatchUpgradeProperties{} + case string(ClusterAvailableUpgradeTypeHotfixUpgrade): + b = &ClusterAvailableUpgradeHotfixUpgradeProperties{} + default: + b = &ClusterAvailableUpgradeProperties{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + func unmarshalClusterJobPropertiesClassification(rawMsg json.RawMessage) (ClusterJobPropertiesClassification, error) { - if rawMsg == nil { + if rawMsg == nil || string(rawMsg) == "null" { return nil, nil } var m map[string]any @@ -30,3 +53,72 @@ func unmarshalClusterJobPropertiesClassification(rawMsg json.RawMessage) (Cluste } return b, nil } + +func unmarshalClusterPoolAvailableUpgradePropertiesClassification(rawMsg json.RawMessage) (ClusterPoolAvailableUpgradePropertiesClassification, error) { + if rawMsg == nil || string(rawMsg) == "null" { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ClusterPoolAvailableUpgradePropertiesClassification + switch m["upgradeType"] { + case string(ClusterPoolAvailableUpgradeTypeAKSPatchUpgrade): + b = &ClusterPoolAvailableUpgradeAksPatchUpgradeProperties{} + case string(ClusterPoolAvailableUpgradeTypeNodeOsUpgrade): + b = &ClusterPoolAvailableUpgradeNodeOsUpgradeProperties{} + default: + b = &ClusterPoolAvailableUpgradeProperties{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalClusterPoolUpgradePropertiesClassification(rawMsg json.RawMessage) (ClusterPoolUpgradePropertiesClassification, error) { + if rawMsg == nil || string(rawMsg) == "null" { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ClusterPoolUpgradePropertiesClassification + switch m["upgradeType"] { + case string(ClusterPoolUpgradeTypeAKSPatchUpgrade): + b = &ClusterPoolAKSPatchVersionUpgradeProperties{} + case string(ClusterPoolUpgradeTypeNodeOsUpgrade): + b = &ClusterPoolNodeOsImageUpdateProperties{} + default: + b = &ClusterPoolUpgradeProperties{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} + +func unmarshalClusterUpgradePropertiesClassification(rawMsg json.RawMessage) (ClusterUpgradePropertiesClassification, error) { + if rawMsg == nil || string(rawMsg) == "null" { + return nil, nil + } + var m map[string]any + if err := json.Unmarshal(rawMsg, &m); err != nil { + return nil, err + } + var b ClusterUpgradePropertiesClassification + switch m["upgradeType"] { + case string(ClusterUpgradeTypeAKSPatchUpgrade): + b = &ClusterAKSPatchVersionUpgradeProperties{} + case string(ClusterUpgradeTypeHotfixUpgrade): + b = &ClusterHotfixUpgradeProperties{} + default: + b = &ClusterUpgradeProperties{} + } + if err := json.Unmarshal(rawMsg, b); err != nil { + return nil, err + } + return b, nil +} diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/response_types.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/responses.go similarity index 84% rename from sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/response_types.go rename to sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/responses.go index ad7ec4bb4b0c..8991f476c794 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/response_types.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/responses.go @@ -20,6 +20,12 @@ type AvailableClusterVersionsClientListByLocationResponse struct { ClusterVersionsListResult } +// ClusterAvailableUpgradesClientListResponse contains the response from method ClusterAvailableUpgradesClient.NewListPager. +type ClusterAvailableUpgradesClientListResponse struct { + // Collection of cluster available upgrade. + ClusterAvailableUpgradeList +} + // ClusterJobsClientListResponse contains the response from method ClusterJobsClient.NewListPager. type ClusterJobsClientListResponse struct { // Collection of cluster job. @@ -32,6 +38,12 @@ type ClusterJobsClientRunJobResponse struct { ClusterJob } +// ClusterPoolAvailableUpgradesClientListResponse contains the response from method ClusterPoolAvailableUpgradesClient.NewListPager. +type ClusterPoolAvailableUpgradesClientListResponse struct { + // collection of cluster pool available upgrade. + ClusterPoolAvailableUpgradeList +} + // ClusterPoolsClientCreateOrUpdateResponse contains the response from method ClusterPoolsClient.BeginCreateOrUpdate. type ClusterPoolsClientCreateOrUpdateResponse struct { // Cluster pool. @@ -67,6 +79,12 @@ type ClusterPoolsClientUpdateTagsResponse struct { ClusterPool } +// ClusterPoolsClientUpgradeResponse contains the response from method ClusterPoolsClient.BeginUpgrade. +type ClusterPoolsClientUpgradeResponse struct { + // Cluster pool. + ClusterPool +} + // ClustersClientCreateResponse contains the response from method ClustersClient.BeginCreate. type ClustersClientCreateResponse struct { // The cluster. @@ -120,6 +138,12 @@ type ClustersClientUpdateResponse struct { Cluster } +// ClustersClientUpgradeResponse contains the response from method ClustersClient.BeginUpgrade. +type ClustersClientUpgradeResponse struct { + // The cluster. + Cluster +} + // LocationsClientCheckNameAvailabilityResponse contains the response from method LocationsClient.CheckNameAvailability. type LocationsClientCheckNameAvailabilityResponse struct { // Result of check name availability. diff --git a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/time_rfc3339.go b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/time_rfc3339.go index 2f72f6993b57..b9dac4761a7b 100644 --- a/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/time_rfc3339.go +++ b/sdk/resourcemanager/hdinsightcontainers/armhdinsightcontainers/time_rfc3339.go @@ -19,12 +19,16 @@ import ( ) // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) +var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`) const ( - utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` - utcDateTime = "2006-01-02T15:04:05.999999999" - dateTimeJSON = `"` + time.RFC3339Nano + `"` + utcDateTime = "2006-01-02T15:04:05.999999999" + utcDateTimeJSON = `"` + utcDateTime + `"` + utcDateTimeNoT = "2006-01-02 15:04:05.999999999" + utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"` + dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00` + dateTimeJSON = `"` + time.RFC3339Nano + `"` + dateTimeJSONNoT = `"` + dateTimeNoT + `"` ) type dateTimeRFC3339 time.Time @@ -40,17 +44,33 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) { } func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcDateTimeJSON - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = dateTimeJSON + } else if tzOffset { + layout = dateTimeJSONNoT + } else if hasT { + layout = utcDateTimeJSON + } else { + layout = utcDateTimeJSONNoT } return t.Parse(layout, string(data)) } func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { - layout := utcDateTime - if tzOffsetRegex.Match(data) { + tzOffset := tzOffsetRegex.Match(data) + hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t") + var layout string + if tzOffset && hasT { layout = time.RFC3339Nano + } else if tzOffset { + layout = dateTimeNoT + } else if hasT { + layout = utcDateTime + } else { + layout = utcDateTimeNoT } return t.Parse(layout, string(data)) } @@ -61,6 +81,10 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error { return err } +func (t dateTimeRFC3339) String() string { + return time.Time(t).Format(time.RFC3339Nano) +} + func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { if t == nil { return @@ -74,7 +98,7 @@ func populateDateTimeRFC3339(m map[string]any, k string, t *time.Time) { } func unpopulateDateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { - if data == nil || strings.EqualFold(string(data), "null") { + if data == nil || string(data) == "null" { return nil } var aux dateTimeRFC3339