diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e21c2f8ad4c0d..9a4583d0a6c59 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -159,6 +159,9 @@ # PRLabel: %Batch /sdk/batch/ @wiboris @dpwatrous +# PRLabel: %Batch +/sdk/batch/Azure.Compute.Batch @wiboris @dpwatrous + # ServiceLabel: %Batch # ServiceOwners: @wiboris @dpwatrous diff --git a/.vscode/cspell.json b/.vscode/cspell.json index ee62decd69de8..5740fc956b714 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -217,7 +217,12 @@ { "filename": "**/sdk/batch/**/*.cs", "words": [ - "cifs" + "cifs", + "ocpdate", + "Reimage", + "Reimaging", + "reimaging", + "SSDLRS" ] }, { diff --git a/sdk/batch/Azure.Compute.Batch/Azure.Compute.Batch.sln b/sdk/batch/Azure.Compute.Batch/Azure.Compute.Batch.sln new file mode 100644 index 0000000000000..55c0addd0d71a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/Azure.Compute.Batch.sln @@ -0,0 +1,56 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29709.97 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Azure.Core.TestFramework", "..\..\core\Azure.Core.TestFramework\src\Azure.Core.TestFramework.csproj", "{ECC730C1-4AEA-420C-916A-66B19B79E4DC}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Azure.Compute.Batch", "src\Azure.Compute.Batch.csproj", "{28FF4005-4467-4E36-92E7-DEA27DEB1519}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Azure.Compute.Batch.Tests", "tests\Azure.Compute.Batch.Tests.csproj", "{1F1CD1D4-9932-4B73-99D8-C252A67D4B46}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Debug|Any CPU.Build.0 = Debug|Any CPU + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Release|Any CPU.ActiveCfg = Release|Any CPU + {B0C276D1-2930-4887-B29A-D1A33E7009A2}.Release|Any CPU.Build.0 = Release|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Debug|Any CPU.Build.0 = Debug|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Release|Any CPU.ActiveCfg = Release|Any CPU + {8E9A77AC-792A-4432-8320-ACFD46730401}.Release|Any CPU.Build.0 = Release|Any CPU + {ECC730C1-4AEA-420C-916A-66B19B79E4DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {ECC730C1-4AEA-420C-916A-66B19B79E4DC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {ECC730C1-4AEA-420C-916A-66B19B79E4DC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {ECC730C1-4AEA-420C-916A-66B19B79E4DC}.Release|Any CPU.Build.0 = Release|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A4241C1F-A53D-474C-9E4E-075054407E74}.Release|Any CPU.Build.0 = Release|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FA8BD3F1-8616-47B6-974C-7576CDF4717E}.Release|Any CPU.Build.0 = Release|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {85677AD3-C214-42FA-AE6E-49B956CAC8DC}.Release|Any CPU.Build.0 = Release|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Debug|Any CPU.Build.0 = Debug|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Release|Any CPU.ActiveCfg = Release|Any CPU + {28FF4005-4467-4E36-92E7-DEA27DEB1519}.Release|Any CPU.Build.0 = Release|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1F1CD1D4-9932-4B73-99D8-C252A67D4B46}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {A97F4B90-2591-4689-B1F8-5F21FE6D6CAE} + EndGlobalSection +EndGlobal diff --git a/sdk/batch/Azure.Compute.Batch/CHANGELOG.md b/sdk/batch/Azure.Compute.Batch/CHANGELOG.md new file mode 100644 index 0000000000000..3bcced74276dd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/CHANGELOG.md @@ -0,0 +1,19 @@ +# Release History + +## 1.0.0-beta.1 (2024-06-01) + +### Breaking Changes + +New design of track 2 initial commit. + +### Package Name + +The package name has been changed from `Microsoft.Azure.Batch` to `Azure.Compute.Batch`. + +### General New Features + +This package follows the [new Azure SDK guidelines](https://azure.github.io/azure-sdk/general_introduction.html), and provides many core capabilities. + +This package is a Public Preview version, so expect incompatible changes in subsequent releases as we improve the product. To provide feedback, submit an issue in our [Azure SDK for .NET GitHub repo](https://github.com/Azure/azure-sdk-for-net/issues). + +> NOTE: For more information about unified authentication, please refer to [Microsoft Azure Identity documentation for .NET](https://docs.microsoft.com//dotnet/api/overview/azure/identity-readme?view=azure-dotnet). diff --git a/sdk/batch/Azure.Compute.Batch/Directory.Build.props b/sdk/batch/Azure.Compute.Batch/Directory.Build.props new file mode 100644 index 0000000000000..63bd836ad44b7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/Directory.Build.props @@ -0,0 +1,6 @@ + + + + diff --git a/sdk/batch/Azure.Compute.Batch/README.md b/sdk/batch/Azure.Compute.Batch/README.md new file mode 100644 index 0000000000000..73d7751649cda --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/README.md @@ -0,0 +1,86 @@ +# Azure Compute.Batch client library for .NET + +Azure.Compute.Batch is a managed service that helps developers get secret simply and securely. + +Use the client library for to: + +* [Get secret](https://docs.microsoft.com/azure) + +[Source code][source_root] | [Package (NuGet)][package] | [API reference documentation][reference_docs] | [Product documentation][azconfig_docs] | [Samples][source_samples] + + [Source code](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/batch/Azure.Compute.Batch/src) | [Package (NuGet)](https://www.nuget.org/packages) | [API reference documentation](https://azure.github.io/azure-sdk-for-net) | [Product documentation](https://docs.microsoft.com/azure) + +## Getting started + +This section should include everything a developer needs to do to install and create their first client connection *very quickly*. + +### Install the package + +First, provide instruction for obtaining and installing the package or library. This section might include only a single line of code, like `dotnet add package package-name`, but should enable a developer to successfully install the package from NuGet, npm, or even cloning a GitHub repository. + +Install the client library for .NET with [NuGet](https://www.nuget.org/ ): + +```dotnetcli +dotnet add package Azure.Compute.Batch --prerelease +``` + +### Prerequisites + +Include a section after the install command that details any requirements that must be satisfied before a developer can [authenticate](#authenticate-the-client) and test all of the snippets in the [Examples](#examples) section. For example, for Cosmos DB: + +> You must have an [Azure subscription](https://azure.microsoft.com/free/dotnet/) and [Cosmos DB account](https://docs.microsoft.com/azure/cosmos-db/account-overview) (SQL API). In order to take advantage of the C# 8.0 syntax, it is recommended that you compile using the [.NET Core SDK](https://dotnet.microsoft.com/download) 3.0 or higher with a [language version](https://docs.microsoft.com/dotnet/csharp/language-reference/configure-language-version#override-a-default) of `latest`. It is also possible to compile with the .NET Core SDK 2.1.x using a language version of `preview`. + +### Authenticate the client + +If your library requires authentication for use, such as for Azure services, include instructions and example code needed for initializing and authenticating. + +For example, include details on obtaining an account key and endpoint URI, setting environment variables for each, and initializing the client object. + +## Key concepts + +The *Key concepts* section should describe the functionality of the main classes. Point out the most important and useful classes in the package (with links to their reference pages) and explain how those classes work together. Feel free to use bulleted lists, tables, code blocks, or even diagrams for clarity. + +Include the *Thread safety* and *Additional concepts* sections below at the end of your *Key concepts* section. You may remove or add links depending on what your library makes use of: + +### Thread safety + +We guarantee that all client instance methods are thread-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/dotnet_introduction.html#dotnet-service-methods-thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across threads. + +### Additional concepts + +[Client options](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#configuring-service-clients-using-clientoptions) | +[Accessing the response](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#accessing-http-response-details-using-responset) | +[Long-running operations](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#consuming-long-running-operations-using-operationt) | +[Handling failures](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#reporting-errors-requestfailedexception) | +[Diagnostics](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/samples/Diagnostics.md) | +[Mocking](https://github.com/Azure/azure-sdk-for-net/blob/main/sdk/core/Azure.Core/README.md#mocking) | +[Client lifetime](https://devblogs.microsoft.com/azure-sdk/lifetime-management-and-thread-safety-guarantees-of-azure-sdk-net-clients/) + + +## Examples + +You can familiarize yourself with different APIs using [Samples](https://github.com/Azure/azure-sdk-for-net/tree/main/sdk/batch/Azure.Compute.Batch/). + +## Troubleshooting + +Describe common errors and exceptions, how to "unpack" them if necessary, and include guidance for graceful handling and recovery. + +Provide information to help developers avoid throttling or other service-enforced errors they might encounter. For example, provide guidance and examples for using retry or connection policies in the API. + +If the package or a related package supports it, include tips for logging or enabling instrumentation to help them debug their code. + +## Next steps + +* Provide a link to additional code examples, ideally to those sitting alongside the README in the package's `/samples` directory. +* If appropriate, point users to other packages that might be useful. +* If you think there's a good chance that developers might stumble across your package in error (because they're searching for specific functionality and mistakenly think the package provides that functionality), point them to the packages they might be looking for. + +## Contributing + +This is a template, but your SDK readme should include details on how to contribute code to the repo/package. + + +[style-guide-msft]: https://docs.microsoft.com/style-guide/capitalization +[style-guide-cloud]: https://aka.ms/azsdk/cloud-style-guide + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-net/sdk/batch/Azure.Compute.Batch/README.png) diff --git a/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs b/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs new file mode 100644 index 0000000000000..1ad6b96f27c9c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/api/Azure.Compute.Batch.netstandard2.0.cs @@ -0,0 +1,3243 @@ +namespace Azure.Compute.Batch +{ + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct AccessScope : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public AccessScope(string value) { throw null; } + public static Azure.Compute.Batch.AccessScope Job { get { throw null; } } + public bool Equals(Azure.Compute.Batch.AccessScope other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.AccessScope left, Azure.Compute.Batch.AccessScope right) { throw null; } + public static implicit operator Azure.Compute.Batch.AccessScope (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.AccessScope left, Azure.Compute.Batch.AccessScope right) { throw null; } + public override string ToString() { throw null; } + } + public partial class AffinityInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public AffinityInfo(string affinityId) { } + public string AffinityId { get { throw null; } set { } } + Azure.Compute.Batch.AffinityInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AffinityInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct AllocationState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public AllocationState(string value) { throw null; } + public static Azure.Compute.Batch.AllocationState Resizing { get { throw null; } } + public static Azure.Compute.Batch.AllocationState Steady { get { throw null; } } + public static Azure.Compute.Batch.AllocationState Stopping { get { throw null; } } + public bool Equals(Azure.Compute.Batch.AllocationState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.AllocationState left, Azure.Compute.Batch.AllocationState right) { throw null; } + public static implicit operator Azure.Compute.Batch.AllocationState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.AllocationState left, Azure.Compute.Batch.AllocationState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class AuthenticationTokenSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public AuthenticationTokenSettings() { } + public System.Collections.Generic.IList Access { get { throw null; } } + Azure.Compute.Batch.AuthenticationTokenSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AuthenticationTokenSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class AutomaticOsUpgradePolicy : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public AutomaticOsUpgradePolicy() { } + public bool? DisableAutomaticRollback { get { throw null; } set { } } + public bool? EnableAutomaticOsUpgrade { get { throw null; } set { } } + public bool? OsRollingUpgradeDeferral { get { throw null; } set { } } + public bool? UseRollingUpgradePolicy { get { throw null; } set { } } + Azure.Compute.Batch.AutomaticOsUpgradePolicy System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AutomaticOsUpgradePolicy System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class AutoScaleRun : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal AutoScaleRun() { } + public Azure.Compute.Batch.AutoScaleRunError Error { get { throw null; } } + public string Results { get { throw null; } } + public System.DateTimeOffset Timestamp { get { throw null; } } + Azure.Compute.Batch.AutoScaleRun System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AutoScaleRun System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class AutoScaleRunError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal AutoScaleRunError() { } + public string Code { get { throw null; } } + public string Message { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Values { get { throw null; } } + Azure.Compute.Batch.AutoScaleRunError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AutoScaleRunError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct AutoUserScope : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public AutoUserScope(string value) { throw null; } + public static Azure.Compute.Batch.AutoUserScope Pool { get { throw null; } } + public static Azure.Compute.Batch.AutoUserScope Task { get { throw null; } } + public bool Equals(Azure.Compute.Batch.AutoUserScope other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.AutoUserScope left, Azure.Compute.Batch.AutoUserScope right) { throw null; } + public static implicit operator Azure.Compute.Batch.AutoUserScope (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.AutoUserScope left, Azure.Compute.Batch.AutoUserScope right) { throw null; } + public override string ToString() { throw null; } + } + public partial class AutoUserSpecification : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public AutoUserSpecification() { } + public Azure.Compute.Batch.ElevationLevel? ElevationLevel { get { throw null; } set { } } + public Azure.Compute.Batch.AutoUserScope? Scope { get { throw null; } set { } } + Azure.Compute.Batch.AutoUserSpecification System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AutoUserSpecification System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class AzureBlobFileSystemConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public AzureBlobFileSystemConfiguration(string accountName, string containerName, string relativeMountPath) { } + public string AccountKey { get { throw null; } set { } } + public string AccountName { get { throw null; } set { } } + public string BlobfuseOptions { get { throw null; } set { } } + public string ContainerName { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeIdentityReference IdentityReference { get { throw null; } set { } } + public string RelativeMountPath { get { throw null; } set { } } + public string SasKey { get { throw null; } set { } } + Azure.Compute.Batch.AzureBlobFileSystemConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AzureBlobFileSystemConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class AzureFileShareConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public AzureFileShareConfiguration(string accountName, string azureFileUrl, string accountKey, string relativeMountPath) { } + public string AccountKey { get { throw null; } set { } } + public string AccountName { get { throw null; } set { } } + public string AzureFileUrl { get { throw null; } set { } } + public string MountOptions { get { throw null; } set { } } + public string RelativeMountPath { get { throw null; } set { } } + Azure.Compute.Batch.AzureFileShareConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.AzureFileShareConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchApplication : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchApplication() { } + public string DisplayName { get { throw null; } } + public string Id { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Versions { get { throw null; } } + Azure.Compute.Batch.BatchApplication System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchApplication System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchApplicationPackageReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchApplicationPackageReference(string applicationId) { } + public string ApplicationId { get { throw null; } set { } } + public string Version { get { throw null; } set { } } + Azure.Compute.Batch.BatchApplicationPackageReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchApplicationPackageReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchAutoPoolSpecification : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchAutoPoolSpecification(Azure.Compute.Batch.BatchPoolLifetimeOption poolLifetimeOption) { } + public string AutoPoolIdPrefix { get { throw null; } set { } } + public bool? KeepAlive { get { throw null; } set { } } + public Azure.Compute.Batch.BatchPoolSpecification Pool { get { throw null; } set { } } + public Azure.Compute.Batch.BatchPoolLifetimeOption PoolLifetimeOption { get { throw null; } set { } } + Azure.Compute.Batch.BatchAutoPoolSpecification System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchAutoPoolSpecification System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchClient + { + protected BatchClient() { } + public BatchClient(System.Uri endpoint, Azure.AzureNamedKeyCredential credential) { } + public BatchClient(System.Uri endpoint, Azure.AzureNamedKeyCredential credential, Azure.Compute.Batch.BatchClientOptions options) { } + public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential) { } + public BatchClient(System.Uri endpoint, Azure.Core.TokenCredential credential, Azure.Compute.Batch.BatchClientOptions options) { } + public virtual Azure.Core.Pipeline.HttpPipeline Pipeline { get { throw null; } } + public virtual Azure.Response CreateJob(Azure.Compute.Batch.BatchJobCreateContent job, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateJob(Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task CreateJobAsync(Azure.Compute.Batch.BatchJobCreateContent job, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task CreateJobAsync(Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response CreateJobSchedule(Azure.Compute.Batch.BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateJobSchedule(Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task CreateJobScheduleAsync(Azure.Compute.Batch.BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task CreateJobScheduleAsync(Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response CreateNodeUser(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeUserCreateContent user, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateNodeUser(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task CreateNodeUserAsync(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeUserCreateContent user, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task CreateNodeUserAsync(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response CreatePool(Azure.Compute.Batch.BatchPoolCreateContent pool, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreatePool(Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task CreatePoolAsync(Azure.Compute.Batch.BatchPoolCreateContent pool, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task CreatePoolAsync(Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response CreateTask(string jobId, Azure.Compute.Batch.BatchTaskCreateContent task, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateTask(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task CreateTaskAsync(string jobId, Azure.Compute.Batch.BatchTaskCreateContent task, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task CreateTaskAsync(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response CreateTaskCollection(string jobId, Azure.Compute.Batch.BatchTaskGroup taskCollection, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response CreateTaskCollection(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task> CreateTaskCollectionAsync(string jobId, Azure.Compute.Batch.BatchTaskGroup taskCollection, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task CreateTaskCollectionAsync(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeleteJob(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeleteJobAsync(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeleteJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeleteNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), bool? recursive = default(bool?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeleteNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), bool? recursive = default(bool?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeleteNodeUser(string poolId, string nodeId, string userName, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeleteNodeUserAsync(string poolId, string nodeId, string userName, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeletePool(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeletePoolAsync(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeleteTask(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeleteTaskAsync(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DeleteTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), bool? recursive = default(bool?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DeleteTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), bool? recursive = default(bool?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DisableJob(string jobId, Azure.Compute.Batch.BatchJobDisableContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response DisableJob(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DisableJobAsync(string jobId, Azure.Compute.Batch.BatchJobDisableContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task DisableJobAsync(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DisableJobSchedule(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DisableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DisableNodeScheduling(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeDisableSchedulingContent parameters = null, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response DisableNodeScheduling(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DisableNodeSchedulingAsync(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeDisableSchedulingContent parameters = null, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task DisableNodeSchedulingAsync(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response DisablePoolAutoScale(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task DisablePoolAutoScaleAsync(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response EnableJob(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task EnableJobAsync(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response EnableJobSchedule(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task EnableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response EnableNodeScheduling(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task EnableNodeSchedulingAsync(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response EnablePoolAutoScale(string poolId, Azure.Compute.Batch.BatchPoolEnableAutoScaleContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response EnablePoolAutoScale(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task EnablePoolAutoScaleAsync(string poolId, Azure.Compute.Batch.BatchPoolEnableAutoScaleContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task EnablePoolAutoScaleAsync(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response EvaluatePoolAutoScale(string poolId, Azure.Compute.Batch.BatchPoolEvaluateAutoScaleContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response EvaluatePoolAutoScale(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task> EvaluatePoolAutoScaleAsync(string poolId, Azure.Compute.Batch.BatchPoolEvaluateAutoScaleContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task EvaluatePoolAutoScaleAsync(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response GetApplication(string applicationId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetApplication(string applicationId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetApplicationAsync(string applicationId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetApplicationAsync(string applicationId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetApplications(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetApplications(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetApplicationsAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetApplicationsAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetJob(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetJob(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetJobAsync(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetJobAsync(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetJobs(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetJobs(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetJobsAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetJobsAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetJobSchedules(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetJobSchedules(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetJobSchedulesAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetJobSchedulesAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetJobsFromSchedules(string jobScheduleId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetJobsFromSchedules(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetJobTaskCounts(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetJobTaskCounts(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetJobTaskCountsAsync(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetJobTaskCountsAsync(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetNode(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetNode(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetNodeExtension(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetNodeExtension(string poolId, string nodeId, string extensionName, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetNodeExtensions(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetNodeExtensions(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, string ocpRange, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), string ocpRange = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, string ocpRange, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), string ocpRange = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetNodeFileProperties(string poolId, string nodeId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> GetNodeFilePropertiesAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetNodeFiles(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetNodeFiles(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, bool? recursive = default(bool?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, bool? recursive = default(bool?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetNodes(string poolId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetNodes(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetNodesAsync(string poolId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetNodesAsync(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetPool(string poolId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetPool(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetPoolAsync(string poolId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetPoolAsync(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetPoolNodeCounts(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetPoolNodeCounts(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetPools(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetPools(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetPoolsAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetPoolsAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetPoolUsageMetrics(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, System.DateTimeOffset? starttime, System.DateTimeOffset? endtime, string filter, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetPoolUsageMetrics(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.DateTimeOffset? starttime = default(System.DateTimeOffset?), System.DateTimeOffset? endtime = default(System.DateTimeOffset?), string filter = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetPoolUsageMetricsAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, System.DateTimeOffset? starttime, System.DateTimeOffset? endtime, string filter, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetPoolUsageMetricsAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), System.DateTimeOffset? starttime = default(System.DateTimeOffset?), System.DateTimeOffset? endtime = default(System.DateTimeOffset?), string filter = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetSubTasks(string jobId, string taskId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetSubTasks(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetSubTasksAsync(string jobId, string taskId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetSubTasksAsync(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetSupportedImages(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetSupportedImages(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetSupportedImagesAsync(int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetSupportedImagesAsync(int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetTask(string jobId, string taskId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetTask(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, string ocpRange, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual Azure.Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), string ocpRange = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, string ocpRange, Azure.RequestConditions requestConditions, Azure.RequestContext context) { throw null; } + public virtual System.Threading.Tasks.Task> GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), string ocpRange = null, Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response GetTaskFileProperties(string jobId, string taskId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task> GetTaskFilePropertiesAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetTaskFiles(string jobId, string taskId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetTaskFiles(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, bool? recursive = default(bool?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetTaskFilesAsync(string jobId, string taskId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetTaskFilesAsync(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, bool? recursive = default(bool?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Pageable GetTasks(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.Pageable GetTasks(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.AsyncPageable GetTasksAsync(string jobId, int? timeOutInSeconds, System.DateTimeOffset? ocpdate, int? maxresults, string filter, System.Collections.Generic.IEnumerable select, System.Collections.Generic.IEnumerable expand, Azure.RequestContext context) { throw null; } + public virtual Azure.AsyncPageable GetTasksAsync(string jobId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), int? maxresults = default(int?), string filter = null, System.Collections.Generic.IEnumerable select = null, System.Collections.Generic.IEnumerable expand = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response JobScheduleExists(string jobScheduleId, int? timeOut = default(int?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task> JobScheduleExistsAsync(string jobScheduleId, int? timeOut = default(int?), System.DateTimeOffset? ocpDate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response PoolExists(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task> PoolExistsAsync(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ReactivateTask(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ReactivateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response RebootNode(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeRebootContent parameters = null, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response RebootNode(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task RebootNodeAsync(string poolId, string nodeId, Azure.Compute.Batch.BatchNodeRebootContent parameters = null, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task RebootNodeAsync(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response RemoveNodes(string poolId, Azure.Compute.Batch.BatchNodeRemoveContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response RemoveNodes(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task RemoveNodesAsync(string poolId, Azure.Compute.Batch.BatchNodeRemoveContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task RemoveNodesAsync(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ReplaceJob(string jobId, Azure.Compute.Batch.BatchJob job, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReplaceJob(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceJobAsync(string jobId, Azure.Compute.Batch.BatchJob job, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceJobAsync(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ReplaceJobSchedule(string jobScheduleId, Azure.Compute.Batch.BatchJobSchedule jobSchedule, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReplaceJobSchedule(string jobScheduleId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceJobScheduleAsync(string jobScheduleId, Azure.Compute.Batch.BatchJobSchedule jobSchedule, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceJobScheduleAsync(string jobScheduleId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ReplaceNodeUser(string poolId, string nodeId, string userName, Azure.Compute.Batch.BatchNodeUserUpdateContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReplaceNodeUser(string poolId, string nodeId, string userName, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, Azure.Compute.Batch.BatchNodeUserUpdateContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ReplacePoolProperties(string poolId, Azure.Compute.Batch.BatchPoolReplaceContent pool, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReplacePoolProperties(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ReplacePoolPropertiesAsync(string poolId, Azure.Compute.Batch.BatchPoolReplaceContent pool, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task ReplacePoolPropertiesAsync(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ReplaceTask(string jobId, string taskId, Azure.Compute.Batch.BatchTask task, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ReplaceTask(string jobId, string taskId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceTaskAsync(string jobId, string taskId, Azure.Compute.Batch.BatchTask task, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task ReplaceTaskAsync(string jobId, string taskId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response ResizePool(string poolId, Azure.Compute.Batch.BatchPoolResizeContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response ResizePool(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task ResizePoolAsync(string poolId, Azure.Compute.Batch.BatchPoolResizeContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task ResizePoolAsync(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response StopPoolResize(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task StopPoolResizeAsync(string poolId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response TerminateJob(string jobId, Azure.Compute.Batch.BatchJobTerminateContent parameters = null, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response TerminateJob(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task TerminateJobAsync(string jobId, Azure.Compute.Batch.BatchJobTerminateContent parameters = null, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task TerminateJobAsync(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response TerminateJobSchedule(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task TerminateJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response TerminateTask(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task TerminateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response UpdateJob(string jobId, Azure.Compute.Batch.BatchJobUpdateContent job, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response UpdateJob(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task UpdateJobAsync(string jobId, Azure.Compute.Batch.BatchJobUpdateContent job, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task UpdateJobAsync(string jobId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response UpdateJobSchedule(string jobScheduleId, Azure.Compute.Batch.BatchJobScheduleUpdateContent jobSchedule, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response UpdateJobSchedule(string jobScheduleId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task UpdateJobScheduleAsync(string jobScheduleId, Azure.Compute.Batch.BatchJobScheduleUpdateContent jobSchedule, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task UpdateJobScheduleAsync(string jobScheduleId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response UpdatePool(string poolId, Azure.Compute.Batch.BatchPoolUpdateContent pool, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response UpdatePool(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task UpdatePoolAsync(string poolId, Azure.Compute.Batch.BatchPoolUpdateContent pool, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task UpdatePoolAsync(string poolId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestConditions requestConditions = null, Azure.RequestContext context = null) { throw null; } + public virtual Azure.Response UploadNodeLogs(string poolId, string nodeId, Azure.Compute.Batch.UploadBatchServiceLogsContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual Azure.Response UploadNodeLogs(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + public virtual System.Threading.Tasks.Task> UploadNodeLogsAsync(string poolId, string nodeId, Azure.Compute.Batch.UploadBatchServiceLogsContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), System.Threading.CancellationToken cancellationToken = default(System.Threading.CancellationToken)) { throw null; } + public virtual System.Threading.Tasks.Task UploadNodeLogsAsync(string poolId, string nodeId, Azure.Core.RequestContent content, int? timeOutInSeconds = default(int?), System.DateTimeOffset? ocpdate = default(System.DateTimeOffset?), Azure.RequestContext context = null) { throw null; } + } + public partial class BatchClientOptions : Azure.Core.ClientOptions + { + public BatchClientOptions(Azure.Compute.Batch.BatchClientOptions.ServiceVersion version = Azure.Compute.Batch.BatchClientOptions.ServiceVersion.V2024_02_01_19_0) { } + public enum ServiceVersion + { + V2024_02_01_19_0 = 1, + } + } + public partial class BatchError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchError() { } + public string Code { get { throw null; } } + public Azure.Compute.Batch.BatchErrorMessage Message { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Values { get { throw null; } } + Azure.Compute.Batch.BatchError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchErrorDetail : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchErrorDetail() { } + public string Key { get { throw null; } } + public string Value { get { throw null; } } + Azure.Compute.Batch.BatchErrorDetail System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchErrorDetail System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchErrorMessage : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchErrorMessage() { } + public string Lang { get { throw null; } } + public string Value { get { throw null; } } + Azure.Compute.Batch.BatchErrorMessage System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchErrorMessage System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchFileProperties + { + internal BatchFileProperties() { } + public bool BatchFileIsDirectory { get { throw null; } } + public string BatchFileMode { get { throw null; } } + public string BatchFileUrl { get { throw null; } } + public System.DateTime CreationTime { get { throw null; } } + } + public partial class BatchJob : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJob(Azure.Compute.Batch.BatchPoolInfo poolInfo) { } + public bool? AllowTaskPreemption { get { throw null; } set { } } + public System.Collections.Generic.IReadOnlyList CommonEnvironmentSettings { get { throw null; } } + public Azure.Compute.Batch.BatchJobConstraints Constraints { get { throw null; } set { } } + public System.DateTimeOffset? CreationTime { get { throw null; } } + public string DisplayName { get { throw null; } } + public string ETag { get { throw null; } } + public Azure.Compute.Batch.BatchJobExecutionInfo ExecutionInfo { get { throw null; } } + public string Id { get { throw null; } } + public Azure.Compute.Batch.BatchJobManagerTask JobManagerTask { get { throw null; } } + public Azure.Compute.Batch.BatchJobPreparationTask JobPreparationTask { get { throw null; } } + public Azure.Compute.Batch.BatchJobReleaseTask JobReleaseTask { get { throw null; } } + public System.DateTimeOffset? LastModified { get { throw null; } } + public int? MaxParallelTasks { get { throw null; } set { } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchJobNetworkConfiguration NetworkConfiguration { get { throw null; } } + public Azure.Compute.Batch.OnAllBatchTasksComplete? OnAllTasksComplete { get { throw null; } set { } } + public Azure.Compute.Batch.OnBatchTaskFailure? OnTaskFailure { get { throw null; } } + public Azure.Compute.Batch.BatchPoolInfo PoolInfo { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobState? PreviousState { get { throw null; } } + public System.DateTimeOffset? PreviousStateTransitionTime { get { throw null; } } + public int? Priority { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobState? State { get { throw null; } } + public System.DateTimeOffset? StateTransitionTime { get { throw null; } } + public Azure.Compute.Batch.BatchJobStatistics Stats { get { throw null; } } + public string Url { get { throw null; } } + public bool? UsesTaskDependencies { get { throw null; } } + Azure.Compute.Batch.BatchJob System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJob System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobAction : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobAction(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobAction Disable { get { throw null; } } + public static Azure.Compute.Batch.BatchJobAction None { get { throw null; } } + public static Azure.Compute.Batch.BatchJobAction Terminate { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobAction other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobAction left, Azure.Compute.Batch.BatchJobAction right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobAction (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobAction left, Azure.Compute.Batch.BatchJobAction right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchJobConstraints : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobConstraints() { } + public int? MaxTaskRetryCount { get { throw null; } set { } } + public System.TimeSpan? MaxWallClockTime { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobConstraints System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobConstraints System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobCreateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobCreateContent(string id, Azure.Compute.Batch.BatchPoolInfo poolInfo) { } + public bool? AllowTaskPreemption { get { throw null; } set { } } + public System.Collections.Generic.IList CommonEnvironmentSettings { get { throw null; } } + public Azure.Compute.Batch.BatchJobConstraints Constraints { get { throw null; } set { } } + public string DisplayName { get { throw null; } set { } } + public string Id { get { throw null; } } + public Azure.Compute.Batch.BatchJobManagerTask JobManagerTask { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobPreparationTask JobPreparationTask { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobReleaseTask JobReleaseTask { get { throw null; } set { } } + public int? MaxParallelTasks { get { throw null; } set { } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchJobNetworkConfiguration NetworkConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.OnAllBatchTasksComplete? OnAllTasksComplete { get { throw null; } set { } } + public Azure.Compute.Batch.OnBatchTaskFailure? OnTaskFailure { get { throw null; } set { } } + public Azure.Compute.Batch.BatchPoolInfo PoolInfo { get { throw null; } } + public int? Priority { get { throw null; } set { } } + public bool? UsesTaskDependencies { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobCreateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobCreateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobDisableContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobDisableContent(Azure.Compute.Batch.DisableBatchJobOption disableTasks) { } + public Azure.Compute.Batch.DisableBatchJobOption DisableTasks { get { throw null; } } + Azure.Compute.Batch.BatchJobDisableContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobDisableContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobExecutionInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobExecutionInfo(System.DateTimeOffset startTime) { } + public System.DateTimeOffset? EndTime { get { throw null; } set { } } + public string PoolId { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobSchedulingError SchedulingError { get { throw null; } set { } } + public System.DateTimeOffset StartTime { get { throw null; } set { } } + public string TerminationReason { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobExecutionInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobExecutionInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobManagerTask : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobManagerTask(string id, string commandLine) { } + public bool? AllowLowPriorityNode { get { throw null; } set { } } + public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } + public Azure.Compute.Batch.AuthenticationTokenSettings AuthenticationTokenSettings { get { throw null; } set { } } + public string CommandLine { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskConstraints Constraints { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskContainerSettings ContainerSettings { get { throw null; } set { } } + public string DisplayName { get { throw null; } set { } } + public System.Collections.Generic.IList EnvironmentSettings { get { throw null; } } + public string Id { get { throw null; } set { } } + public bool? KillJobOnCompletion { get { throw null; } set { } } + public System.Collections.Generic.IList OutputFiles { get { throw null; } } + public int? RequiredSlots { get { throw null; } set { } } + public System.Collections.Generic.IList ResourceFiles { get { throw null; } } + public bool? RunExclusive { get { throw null; } set { } } + public Azure.Compute.Batch.UserIdentity UserIdentity { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobManagerTask System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobManagerTask System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobNetworkConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobNetworkConfiguration(string subnetId) { } + public string SubnetId { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobNetworkConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobNetworkConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobPreparationAndReleaseTaskStatus : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchJobPreparationAndReleaseTaskStatus() { } + public Azure.Compute.Batch.BatchJobPreparationTaskExecutionInfo JobPreparationTaskExecutionInfo { get { throw null; } } + public Azure.Compute.Batch.BatchJobReleaseTaskExecutionInfo JobReleaseTaskExecutionInfo { get { throw null; } } + public string NodeId { get { throw null; } } + public string NodeUrl { get { throw null; } } + public string PoolId { get { throw null; } } + Azure.Compute.Batch.BatchJobPreparationAndReleaseTaskStatus System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobPreparationAndReleaseTaskStatus System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobPreparationTask : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobPreparationTask(string commandLine) { } + public string CommandLine { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskConstraints Constraints { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskContainerSettings ContainerSettings { get { throw null; } set { } } + public System.Collections.Generic.IList EnvironmentSettings { get { throw null; } } + public string Id { get { throw null; } set { } } + public bool? RerunOnNodeRebootAfterSuccess { get { throw null; } set { } } + public System.Collections.Generic.IList ResourceFiles { get { throw null; } } + public Azure.Compute.Batch.UserIdentity UserIdentity { get { throw null; } set { } } + public bool? WaitForSuccess { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobPreparationTask System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobPreparationTask System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobPreparationTaskExecutionInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchJobPreparationTaskExecutionInfo() { } + public Azure.Compute.Batch.BatchTaskContainerExecutionInfo ContainerInfo { get { throw null; } } + public System.DateTimeOffset? EndTime { get { throw null; } } + public int? ExitCode { get { throw null; } } + public Azure.Compute.Batch.BatchTaskFailureInfo FailureInfo { get { throw null; } } + public System.DateTimeOffset? LastRetryTime { get { throw null; } } + public Azure.Compute.Batch.BatchTaskExecutionResult? Result { get { throw null; } } + public int RetryCount { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + public Azure.Compute.Batch.BatchJobPreparationTaskState State { get { throw null; } } + public string TaskRootDirectory { get { throw null; } } + public string TaskRootDirectoryUrl { get { throw null; } } + Azure.Compute.Batch.BatchJobPreparationTaskExecutionInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobPreparationTaskExecutionInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobPreparationTaskState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobPreparationTaskState(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobPreparationTaskState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchJobPreparationTaskState Running { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobPreparationTaskState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobPreparationTaskState left, Azure.Compute.Batch.BatchJobPreparationTaskState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobPreparationTaskState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobPreparationTaskState left, Azure.Compute.Batch.BatchJobPreparationTaskState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchJobReleaseTask : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobReleaseTask(string commandLine) { } + public string CommandLine { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskContainerSettings ContainerSettings { get { throw null; } set { } } + public System.Collections.Generic.IList EnvironmentSettings { get { throw null; } } + public string Id { get { throw null; } set { } } + public System.TimeSpan? MaxWallClockTime { get { throw null; } set { } } + public System.Collections.Generic.IList ResourceFiles { get { throw null; } } + public System.TimeSpan? RetentionTime { get { throw null; } set { } } + public Azure.Compute.Batch.UserIdentity UserIdentity { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobReleaseTask System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobReleaseTask System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobReleaseTaskExecutionInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchJobReleaseTaskExecutionInfo() { } + public Azure.Compute.Batch.BatchTaskContainerExecutionInfo ContainerInfo { get { throw null; } } + public System.DateTimeOffset? EndTime { get { throw null; } } + public int? ExitCode { get { throw null; } } + public Azure.Compute.Batch.BatchTaskFailureInfo FailureInfo { get { throw null; } } + public Azure.Compute.Batch.BatchTaskExecutionResult? Result { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + public Azure.Compute.Batch.BatchJobReleaseTaskState State { get { throw null; } } + public string TaskRootDirectory { get { throw null; } } + public string TaskRootDirectoryUrl { get { throw null; } } + Azure.Compute.Batch.BatchJobReleaseTaskExecutionInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobReleaseTaskExecutionInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobReleaseTaskState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobReleaseTaskState(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobReleaseTaskState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchJobReleaseTaskState Running { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobReleaseTaskState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobReleaseTaskState left, Azure.Compute.Batch.BatchJobReleaseTaskState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobReleaseTaskState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobReleaseTaskState left, Azure.Compute.Batch.BatchJobReleaseTaskState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchJobSchedule : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobSchedule(Azure.Compute.Batch.BatchJobSpecification jobSpecification) { } + public System.DateTimeOffset? CreationTime { get { throw null; } } + public string DisplayName { get { throw null; } } + public string ETag { get { throw null; } } + public Azure.Compute.Batch.BatchJobScheduleExecutionInfo ExecutionInfo { get { throw null; } } + public string Id { get { throw null; } } + public Azure.Compute.Batch.BatchJobSpecification JobSpecification { get { throw null; } set { } } + public System.DateTimeOffset? LastModified { get { throw null; } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchJobScheduleState? PreviousState { get { throw null; } } + public System.DateTimeOffset? PreviousStateTransitionTime { get { throw null; } } + public Azure.Compute.Batch.BatchJobScheduleConfiguration Schedule { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobScheduleState? State { get { throw null; } } + public System.DateTimeOffset? StateTransitionTime { get { throw null; } } + public Azure.Compute.Batch.BatchJobScheduleStatistics Stats { get { throw null; } } + public string Url { get { throw null; } } + Azure.Compute.Batch.BatchJobSchedule System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobSchedule System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobScheduleConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobScheduleConfiguration() { } + public System.DateTimeOffset? DoNotRunAfter { get { throw null; } set { } } + public System.DateTimeOffset? DoNotRunUntil { get { throw null; } set { } } + public System.TimeSpan? RecurrenceInterval { get { throw null; } set { } } + public System.TimeSpan? StartWindow { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobScheduleConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobScheduleConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobScheduleCreateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobScheduleCreateContent(string id, Azure.Compute.Batch.BatchJobScheduleConfiguration schedule, Azure.Compute.Batch.BatchJobSpecification jobSpecification) { } + public string DisplayName { get { throw null; } set { } } + public string Id { get { throw null; } } + public Azure.Compute.Batch.BatchJobSpecification JobSpecification { get { throw null; } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchJobScheduleConfiguration Schedule { get { throw null; } } + Azure.Compute.Batch.BatchJobScheduleCreateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobScheduleCreateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobScheduleExecutionInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobScheduleExecutionInfo() { } + public System.DateTimeOffset? EndTime { get { throw null; } set { } } + public System.DateTimeOffset? NextRunTime { get { throw null; } set { } } + public Azure.Compute.Batch.RecentBatchJob RecentJob { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobScheduleExecutionInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobScheduleExecutionInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobScheduleState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobScheduleState(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobScheduleState Active { get { throw null; } } + public static Azure.Compute.Batch.BatchJobScheduleState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchJobScheduleState Deleting { get { throw null; } } + public static Azure.Compute.Batch.BatchJobScheduleState Disabled { get { throw null; } } + public static Azure.Compute.Batch.BatchJobScheduleState Terminating { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobScheduleState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobScheduleState left, Azure.Compute.Batch.BatchJobScheduleState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobScheduleState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobScheduleState left, Azure.Compute.Batch.BatchJobScheduleState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchJobScheduleStatistics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobScheduleStatistics(string url, System.DateTimeOffset startTime, System.DateTimeOffset lastUpdateTime, System.TimeSpan userCpuTime, System.TimeSpan kernelCpuTime, System.TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, System.TimeSpan waitTime) { } + public System.TimeSpan KernelCpuTime { get { throw null; } set { } } + public System.DateTimeOffset LastUpdateTime { get { throw null; } set { } } + public long NumFailedTasks { get { throw null; } set { } } + public long NumSucceededTasks { get { throw null; } set { } } + public long NumTaskRetries { get { throw null; } set { } } + public float ReadIOGiB { get { throw null; } set { } } + public long ReadIOps { get { throw null; } set { } } + public System.DateTimeOffset StartTime { get { throw null; } set { } } + public string Url { get { throw null; } set { } } + public System.TimeSpan UserCpuTime { get { throw null; } set { } } + public System.TimeSpan WaitTime { get { throw null; } set { } } + public System.TimeSpan WallClockTime { get { throw null; } set { } } + public float WriteIOGiB { get { throw null; } set { } } + public long WriteIOps { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobScheduleStatistics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobScheduleStatistics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobScheduleUpdateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobScheduleUpdateContent() { } + public Azure.Compute.Batch.BatchJobSpecification JobSpecification { get { throw null; } set { } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchJobScheduleConfiguration Schedule { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobScheduleUpdateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobScheduleUpdateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobSchedulingError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobSchedulingError(Azure.Compute.Batch.ErrorCategory category) { } + public Azure.Compute.Batch.ErrorCategory Category { get { throw null; } set { } } + public string Code { get { throw null; } set { } } + public System.Collections.Generic.IList Details { get { throw null; } } + public string Message { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobSchedulingError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobSchedulingError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobSpecification : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobSpecification(Azure.Compute.Batch.BatchPoolInfo poolInfo) { } + public bool? AllowTaskPreemption { get { throw null; } set { } } + public System.Collections.Generic.IList CommonEnvironmentSettings { get { throw null; } } + public Azure.Compute.Batch.BatchJobConstraints Constraints { get { throw null; } set { } } + public string DisplayName { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobManagerTask JobManagerTask { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobPreparationTask JobPreparationTask { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobReleaseTask JobReleaseTask { get { throw null; } set { } } + public int? MaxParallelTasks { get { throw null; } set { } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchJobNetworkConfiguration NetworkConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.OnAllBatchTasksComplete? OnAllTasksComplete { get { throw null; } set { } } + public Azure.Compute.Batch.OnBatchTaskFailure? OnTaskFailure { get { throw null; } set { } } + public Azure.Compute.Batch.BatchPoolInfo PoolInfo { get { throw null; } set { } } + public int? Priority { get { throw null; } set { } } + public bool? UsesTaskDependencies { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobSpecification System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobSpecification System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchJobState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchJobState(string value) { throw null; } + public static Azure.Compute.Batch.BatchJobState Active { get { throw null; } } + public static Azure.Compute.Batch.BatchJobState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchJobState Deleting { get { throw null; } } + public static Azure.Compute.Batch.BatchJobState Disabled { get { throw null; } } + public static Azure.Compute.Batch.BatchJobState Disabling { get { throw null; } } + public static Azure.Compute.Batch.BatchJobState Enabling { get { throw null; } } + public static Azure.Compute.Batch.BatchJobState Terminating { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchJobState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchJobState left, Azure.Compute.Batch.BatchJobState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchJobState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchJobState left, Azure.Compute.Batch.BatchJobState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchJobStatistics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobStatistics(string url, System.DateTimeOffset startTime, System.DateTimeOffset lastUpdateTime, System.TimeSpan userCpuTime, System.TimeSpan kernelCpuTime, System.TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, System.TimeSpan waitTime) { } + public System.TimeSpan KernelCpuTime { get { throw null; } set { } } + public System.DateTimeOffset LastUpdateTime { get { throw null; } set { } } + public long NumFailedTasks { get { throw null; } set { } } + public long NumSucceededTasks { get { throw null; } set { } } + public long NumTaskRetries { get { throw null; } set { } } + public float ReadIOGiB { get { throw null; } set { } } + public long ReadIOps { get { throw null; } set { } } + public System.DateTimeOffset StartTime { get { throw null; } set { } } + public string Url { get { throw null; } set { } } + public System.TimeSpan UserCpuTime { get { throw null; } set { } } + public System.TimeSpan WaitTime { get { throw null; } set { } } + public System.TimeSpan WallClockTime { get { throw null; } set { } } + public float WriteIOGiB { get { throw null; } set { } } + public long WriteIOps { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobStatistics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobStatistics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobTerminateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobTerminateContent() { } + public string TerminationReason { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobTerminateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobTerminateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchJobUpdateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchJobUpdateContent() { } + public bool? AllowTaskPreemption { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobConstraints Constraints { get { throw null; } set { } } + public int? MaxParallelTasks { get { throw null; } set { } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.OnAllBatchTasksComplete? OnAllTasksComplete { get { throw null; } set { } } + public Azure.Compute.Batch.BatchPoolInfo PoolInfo { get { throw null; } set { } } + public int? Priority { get { throw null; } set { } } + Azure.Compute.Batch.BatchJobUpdateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchJobUpdateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNode : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNode() { } + public string AffinityId { get { throw null; } } + public System.DateTimeOffset? AllocationTime { get { throw null; } } + public Azure.Compute.Batch.BatchNodeEndpointConfiguration EndpointConfiguration { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Errors { get { throw null; } } + public string Id { get { throw null; } } + public string IpAddress { get { throw null; } } + public bool? IsDedicated { get { throw null; } } + public System.DateTimeOffset? LastBootTime { get { throw null; } } + public Azure.Compute.Batch.BatchNodeAgentInfo NodeAgentInfo { get { throw null; } } + public System.Collections.Generic.IReadOnlyList RecentTasks { get { throw null; } } + public int? RunningTasksCount { get { throw null; } } + public int? RunningTaskSlotsCount { get { throw null; } } + public Azure.Compute.Batch.SchedulingState? SchedulingState { get { throw null; } } + public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } } + public Azure.Compute.Batch.BatchStartTaskInfo StartTaskInfo { get { throw null; } } + public Azure.Compute.Batch.BatchNodeState? State { get { throw null; } } + public System.DateTimeOffset? StateTransitionTime { get { throw null; } } + public int? TotalTasksRun { get { throw null; } } + public int? TotalTasksSucceeded { get { throw null; } } + public string Url { get { throw null; } } + public Azure.Compute.Batch.VirtualMachineInfo VirtualMachineInfo { get { throw null; } } + public string VmSize { get { throw null; } } + Azure.Compute.Batch.BatchNode System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNode System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeAgentInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeAgentInfo() { } + public System.DateTimeOffset LastUpdateTime { get { throw null; } } + public string Version { get { throw null; } } + Azure.Compute.Batch.BatchNodeAgentInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeAgentInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodeCommunicationMode : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodeCommunicationMode(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodeCommunicationMode Classic { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeCommunicationMode Default { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeCommunicationMode Simplified { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodeCommunicationMode other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodeCommunicationMode left, Azure.Compute.Batch.BatchNodeCommunicationMode right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodeCommunicationMode (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodeCommunicationMode left, Azure.Compute.Batch.BatchNodeCommunicationMode right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeCounts : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeCounts() { } + public int Creating { get { throw null; } } + public int Idle { get { throw null; } } + public int LeavingPool { get { throw null; } } + public int Offline { get { throw null; } } + public int Preempted { get { throw null; } } + public int Rebooting { get { throw null; } } + public int Reimaging { get { throw null; } } + public int Running { get { throw null; } } + public int Starting { get { throw null; } } + public int StartTaskFailed { get { throw null; } } + public int Total { get { throw null; } } + public int Unknown { get { throw null; } } + public int Unusable { get { throw null; } } + public int UpgradingOs { get { throw null; } } + public int WaitingForStartTask { get { throw null; } } + Azure.Compute.Batch.BatchNodeCounts System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeCounts System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodeDeallocationOption : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodeDeallocationOption(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodeDeallocationOption Requeue { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeDeallocationOption RetainedData { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeDeallocationOption TaskCompletion { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeDeallocationOption Terminate { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodeDeallocationOption other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodeDeallocationOption left, Azure.Compute.Batch.BatchNodeDeallocationOption right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodeDeallocationOption (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodeDeallocationOption left, Azure.Compute.Batch.BatchNodeDeallocationOption right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeDisableSchedulingContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeDisableSchedulingContent() { } + public Azure.Compute.Batch.BatchNodeDisableSchedulingOption? NodeDisableSchedulingOption { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeDisableSchedulingContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeDisableSchedulingContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodeDisableSchedulingOption : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodeDisableSchedulingOption(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodeDisableSchedulingOption Requeue { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeDisableSchedulingOption TaskCompletion { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeDisableSchedulingOption Terminate { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodeDisableSchedulingOption other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodeDisableSchedulingOption left, Azure.Compute.Batch.BatchNodeDisableSchedulingOption right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodeDisableSchedulingOption (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodeDisableSchedulingOption left, Azure.Compute.Batch.BatchNodeDisableSchedulingOption right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeEndpointConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeEndpointConfiguration() { } + public System.Collections.Generic.IReadOnlyList InboundEndpoints { get { throw null; } } + Azure.Compute.Batch.BatchNodeEndpointConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeEndpointConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeError() { } + public string Code { get { throw null; } } + public System.Collections.Generic.IReadOnlyList ErrorDetails { get { throw null; } } + public string Message { get { throw null; } } + Azure.Compute.Batch.BatchNodeError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeFile : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeFile() { } + public bool? IsDirectory { get { throw null; } } + public string Name { get { throw null; } } + public Azure.Compute.Batch.FileProperties Properties { get { throw null; } } + public string Url { get { throw null; } } + Azure.Compute.Batch.BatchNodeFile System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeFile System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodeFillType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodeFillType(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodeFillType Pack { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeFillType Spread { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodeFillType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodeFillType left, Azure.Compute.Batch.BatchNodeFillType right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodeFillType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodeFillType left, Azure.Compute.Batch.BatchNodeFillType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeIdentityReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeIdentityReference() { } + public string ResourceId { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeIdentityReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeIdentityReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeInfo() { } + public string AffinityId { get { throw null; } set { } } + public string NodeId { get { throw null; } set { } } + public string NodeUrl { get { throw null; } set { } } + public string PoolId { get { throw null; } set { } } + public string TaskRootDirectory { get { throw null; } set { } } + public string TaskRootDirectoryUrl { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodePlacementConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodePlacementConfiguration() { } + public Azure.Compute.Batch.BatchNodePlacementPolicyType? Policy { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodePlacementConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodePlacementConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodePlacementPolicyType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodePlacementPolicyType(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodePlacementPolicyType Regional { get { throw null; } } + public static Azure.Compute.Batch.BatchNodePlacementPolicyType Zonal { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodePlacementPolicyType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodePlacementPolicyType left, Azure.Compute.Batch.BatchNodePlacementPolicyType right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodePlacementPolicyType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodePlacementPolicyType left, Azure.Compute.Batch.BatchNodePlacementPolicyType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeRebootContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeRebootContent() { } + public Azure.Compute.Batch.BatchNodeRebootOption? NodeRebootOption { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeRebootContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeRebootContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodeRebootOption : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodeRebootOption(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodeRebootOption Requeue { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeRebootOption RetainedData { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeRebootOption TaskCompletion { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeRebootOption Terminate { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodeRebootOption other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodeRebootOption left, Azure.Compute.Batch.BatchNodeRebootOption right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodeRebootOption (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodeRebootOption left, Azure.Compute.Batch.BatchNodeRebootOption right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeRemoteLoginSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeRemoteLoginSettings() { } + public string RemoteLoginIpAddress { get { throw null; } } + public int RemoteLoginPort { get { throw null; } } + Azure.Compute.Batch.BatchNodeRemoteLoginSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeRemoteLoginSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeRemoveContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeRemoveContent(System.Collections.Generic.IEnumerable nodeList) { } + public Azure.Compute.Batch.BatchNodeDeallocationOption? NodeDeallocationOption { get { throw null; } set { } } + public System.Collections.Generic.IList NodeList { get { throw null; } } + public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeRemoveContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeRemoveContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchNodeState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchNodeState(string value) { throw null; } + public static Azure.Compute.Batch.BatchNodeState Creating { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Idle { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState LeavingPool { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Offline { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Preempted { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Rebooting { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Reimaging { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Running { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Starting { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState StartTaskFailed { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Unknown { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState Unusable { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState UpgradingOS { get { throw null; } } + public static Azure.Compute.Batch.BatchNodeState WaitingForStartTask { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchNodeState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchNodeState left, Azure.Compute.Batch.BatchNodeState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchNodeState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchNodeState left, Azure.Compute.Batch.BatchNodeState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchNodeUserCreateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeUserCreateContent(string name) { } + public System.DateTimeOffset? ExpiryTime { get { throw null; } set { } } + public bool? IsAdmin { get { throw null; } set { } } + public string Name { get { throw null; } } + public string Password { get { throw null; } set { } } + public string SshPublicKey { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeUserCreateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeUserCreateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeUserUpdateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchNodeUserUpdateContent() { } + public System.DateTimeOffset? ExpiryTime { get { throw null; } set { } } + public string Password { get { throw null; } set { } } + public string SshPublicKey { get { throw null; } set { } } + Azure.Compute.Batch.BatchNodeUserUpdateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeUserUpdateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchNodeVMExtension : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchNodeVMExtension() { } + public Azure.Compute.Batch.VMExtensionInstanceView InstanceView { get { throw null; } } + public string ProvisioningState { get { throw null; } } + public Azure.Compute.Batch.VMExtension VmExtension { get { throw null; } } + Azure.Compute.Batch.BatchNodeVMExtension System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchNodeVMExtension System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPool : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPool() { } + public Azure.Compute.Batch.AllocationState? AllocationState { get { throw null; } } + public System.DateTimeOffset? AllocationStateTransitionTime { get { throw null; } } + public System.Collections.Generic.IReadOnlyList ApplicationPackageReferences { get { throw null; } } + public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } } + public string AutoScaleFormula { get { throw null; } } + public Azure.Compute.Batch.AutoScaleRun AutoScaleRun { get { throw null; } } + public System.DateTimeOffset? CreationTime { get { throw null; } } + public int? CurrentDedicatedNodes { get { throw null; } } + public int? CurrentLowPriorityNodes { get { throw null; } } + public Azure.Compute.Batch.BatchNodeCommunicationMode? CurrentNodeCommunicationMode { get { throw null; } } + public string DisplayName { get { throw null; } } + public bool? EnableAutoScale { get { throw null; } } + public bool? EnableInterNodeCommunication { get { throw null; } } + public string ETag { get { throw null; } } + public string Id { get { throw null; } } + public Azure.Compute.Batch.BatchPoolIdentity Identity { get { throw null; } } + public System.DateTimeOffset? LastModified { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Metadata { get { throw null; } } + public System.Collections.Generic.IReadOnlyList MountConfiguration { get { throw null; } } + public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } } + public System.Collections.Generic.IReadOnlyList ResizeErrors { get { throw null; } } + public System.TimeSpan? ResizeTimeout { get { throw null; } } + public System.Collections.Generic.IReadOnlyDictionary ResourceTags { get { throw null; } } + public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } } + public Azure.Compute.Batch.BatchPoolState? State { get { throw null; } } + public System.DateTimeOffset? StateTransitionTime { get { throw null; } } + public Azure.Compute.Batch.BatchPoolStatistics Stats { get { throw null; } } + public int? TargetDedicatedNodes { get { throw null; } } + public int? TargetLowPriorityNodes { get { throw null; } } + public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } } + public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } } + public int? TaskSlotsPerNode { get { throw null; } } + public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } } + public string Url { get { throw null; } } + public System.Collections.Generic.IReadOnlyList UserAccounts { get { throw null; } } + public Azure.Compute.Batch.VirtualMachineConfiguration VirtualMachineConfiguration { get { throw null; } } + public string VmSize { get { throw null; } } + Azure.Compute.Batch.BatchPool System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPool System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolCreateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolCreateContent(string id, string vmSize) { } + public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } + public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } + public string AutoScaleFormula { get { throw null; } set { } } + public string DisplayName { get { throw null; } set { } } + public bool? EnableAutoScale { get { throw null; } set { } } + public bool? EnableInterNodeCommunication { get { throw null; } set { } } + public string Id { get { throw null; } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public System.Collections.Generic.IList MountConfiguration { get { throw null; } } + public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } + public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } + public System.Collections.Generic.IDictionary ResourceTags { get { throw null; } } + public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } + public int? TargetDedicatedNodes { get { throw null; } set { } } + public int? TargetLowPriorityNodes { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } + public int? TaskSlotsPerNode { get { throw null; } set { } } + public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } + public System.Collections.Generic.IList UserAccounts { get { throw null; } } + public Azure.Compute.Batch.VirtualMachineConfiguration VirtualMachineConfiguration { get { throw null; } set { } } + public string VmSize { get { throw null; } } + Azure.Compute.Batch.BatchPoolCreateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolCreateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolEnableAutoScaleContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolEnableAutoScaleContent() { } + public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } + public string AutoScaleFormula { get { throw null; } set { } } + Azure.Compute.Batch.BatchPoolEnableAutoScaleContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolEnableAutoScaleContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolEndpointConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolEndpointConfiguration(System.Collections.Generic.IEnumerable inboundNatPools) { } + public System.Collections.Generic.IList InboundNatPools { get { throw null; } } + Azure.Compute.Batch.BatchPoolEndpointConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolEndpointConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolEvaluateAutoScaleContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolEvaluateAutoScaleContent(string autoScaleFormula) { } + public string AutoScaleFormula { get { throw null; } } + Azure.Compute.Batch.BatchPoolEvaluateAutoScaleContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolEvaluateAutoScaleContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolIdentity : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPoolIdentity() { } + public Azure.Compute.Batch.BatchPoolIdentityType Type { get { throw null; } } + public System.Collections.Generic.IReadOnlyList UserAssignedIdentities { get { throw null; } } + Azure.Compute.Batch.BatchPoolIdentity System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolIdentity System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchPoolIdentityType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchPoolIdentityType(string value) { throw null; } + public static Azure.Compute.Batch.BatchPoolIdentityType None { get { throw null; } } + public static Azure.Compute.Batch.BatchPoolIdentityType UserAssigned { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchPoolIdentityType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchPoolIdentityType left, Azure.Compute.Batch.BatchPoolIdentityType right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchPoolIdentityType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchPoolIdentityType left, Azure.Compute.Batch.BatchPoolIdentityType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchPoolInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolInfo() { } + public Azure.Compute.Batch.BatchAutoPoolSpecification AutoPoolSpecification { get { throw null; } set { } } + public string PoolId { get { throw null; } set { } } + Azure.Compute.Batch.BatchPoolInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchPoolLifetimeOption : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchPoolLifetimeOption(string value) { throw null; } + public static Azure.Compute.Batch.BatchPoolLifetimeOption Job { get { throw null; } } + public static Azure.Compute.Batch.BatchPoolLifetimeOption JobSchedule { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchPoolLifetimeOption other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchPoolLifetimeOption left, Azure.Compute.Batch.BatchPoolLifetimeOption right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchPoolLifetimeOption (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchPoolLifetimeOption left, Azure.Compute.Batch.BatchPoolLifetimeOption right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchPoolNodeCounts : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPoolNodeCounts() { } + public Azure.Compute.Batch.BatchNodeCounts Dedicated { get { throw null; } } + public Azure.Compute.Batch.BatchNodeCounts LowPriority { get { throw null; } } + public string PoolId { get { throw null; } } + Azure.Compute.Batch.BatchPoolNodeCounts System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolNodeCounts System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolReplaceContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolReplaceContent(System.Collections.Generic.IEnumerable applicationPackageReferences, System.Collections.Generic.IEnumerable metadata) { } + public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } + void global::System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolReplaceContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + Azure.Compute.Batch.BatchPoolReplaceContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolResizeContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolResizeContent() { } + public Azure.Compute.Batch.BatchNodeDeallocationOption? NodeDeallocationOption { get { throw null; } set { } } + public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } + public int? TargetDedicatedNodes { get { throw null; } set { } } + public int? TargetLowPriorityNodes { get { throw null; } set { } } + Azure.Compute.Batch.BatchPoolResizeContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolResizeContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolResourceStatistics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPoolResourceStatistics() { } + public float AvgCpuPercentage { get { throw null; } } + public float AvgDiskGiB { get { throw null; } } + public float AvgMemoryGiB { get { throw null; } } + public float DiskReadGiB { get { throw null; } } + public long DiskReadIOps { get { throw null; } } + public float DiskWriteGiB { get { throw null; } } + public long DiskWriteIOps { get { throw null; } } + public System.DateTimeOffset LastUpdateTime { get { throw null; } } + public float NetworkReadGiB { get { throw null; } } + public float NetworkWriteGiB { get { throw null; } } + public float PeakDiskGiB { get { throw null; } } + public float PeakMemoryGiB { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + Azure.Compute.Batch.BatchPoolResourceStatistics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolResourceStatistics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolSpecification : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolSpecification(string vmSize) { } + public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } + public System.TimeSpan? AutoScaleEvaluationInterval { get { throw null; } set { } } + public string AutoScaleFormula { get { throw null; } set { } } + public string DisplayName { get { throw null; } set { } } + public bool? EnableAutoScale { get { throw null; } set { } } + public bool? EnableInterNodeCommunication { get { throw null; } set { } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public System.Collections.Generic.IList MountConfiguration { get { throw null; } } + public Azure.Compute.Batch.NetworkConfiguration NetworkConfiguration { get { throw null; } set { } } + public System.TimeSpan? ResizeTimeout { get { throw null; } set { } } + public string ResourceTags { get { throw null; } set { } } + public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } + public int? TargetDedicatedNodes { get { throw null; } set { } } + public int? TargetLowPriorityNodes { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskSchedulingPolicy TaskSchedulingPolicy { get { throw null; } set { } } + public int? TaskSlotsPerNode { get { throw null; } set { } } + public Azure.Compute.Batch.UpgradePolicy UpgradePolicy { get { throw null; } set { } } + public System.Collections.Generic.IList UserAccounts { get { throw null; } } + public Azure.Compute.Batch.VirtualMachineConfiguration VirtualMachineConfiguration { get { throw null; } set { } } + public string VmSize { get { throw null; } set { } } + Azure.Compute.Batch.BatchPoolSpecification System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolSpecification System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchPoolState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchPoolState(string value) { throw null; } + public static Azure.Compute.Batch.BatchPoolState Active { get { throw null; } } + public static Azure.Compute.Batch.BatchPoolState Deleting { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchPoolState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchPoolState left, Azure.Compute.Batch.BatchPoolState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchPoolState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchPoolState left, Azure.Compute.Batch.BatchPoolState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchPoolStatistics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPoolStatistics() { } + public System.DateTimeOffset LastUpdateTime { get { throw null; } } + public Azure.Compute.Batch.BatchPoolResourceStatistics ResourceStats { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + public string Url { get { throw null; } } + public Azure.Compute.Batch.BatchPoolUsageStatistics UsageStats { get { throw null; } } + Azure.Compute.Batch.BatchPoolStatistics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolStatistics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolUpdateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchPoolUpdateContent() { } + public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } + public System.Collections.Generic.IList Metadata { get { throw null; } } + public Azure.Compute.Batch.BatchStartTask StartTask { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeCommunicationMode? TargetNodeCommunicationMode { get { throw null; } set { } } + Azure.Compute.Batch.BatchPoolUpdateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolUpdateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolUsageMetrics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPoolUsageMetrics() { } + public System.DateTimeOffset EndTime { get { throw null; } } + public string PoolId { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + public float TotalCoreHours { get { throw null; } } + public string VmSize { get { throw null; } } + Azure.Compute.Batch.BatchPoolUsageMetrics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolUsageMetrics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchPoolUsageStatistics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchPoolUsageStatistics() { } + public System.TimeSpan DedicatedCoreTime { get { throw null; } } + public System.DateTimeOffset LastUpdateTime { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + Azure.Compute.Batch.BatchPoolUsageStatistics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchPoolUsageStatistics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchStartTask : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchStartTask(string commandLine) { } + public string CommandLine { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskContainerSettings ContainerSettings { get { throw null; } set { } } + public System.Collections.Generic.IList EnvironmentSettings { get { throw null; } } + public int? MaxTaskRetryCount { get { throw null; } set { } } + public System.Collections.Generic.IList ResourceFiles { get { throw null; } } + public Azure.Compute.Batch.UserIdentity UserIdentity { get { throw null; } set { } } + public bool? WaitForSuccess { get { throw null; } set { } } + Azure.Compute.Batch.BatchStartTask System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchStartTask System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchStartTaskInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchStartTaskInfo() { } + public Azure.Compute.Batch.BatchTaskContainerExecutionInfo ContainerInfo { get { throw null; } } + public System.DateTimeOffset? EndTime { get { throw null; } } + public int? ExitCode { get { throw null; } } + public Azure.Compute.Batch.BatchTaskFailureInfo FailureInfo { get { throw null; } } + public System.DateTimeOffset? LastRetryTime { get { throw null; } } + public Azure.Compute.Batch.BatchTaskExecutionResult? Result { get { throw null; } } + public int RetryCount { get { throw null; } } + public System.DateTimeOffset StartTime { get { throw null; } } + public Azure.Compute.Batch.BatchStartTaskState State { get { throw null; } } + Azure.Compute.Batch.BatchStartTaskInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchStartTaskInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchStartTaskState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchStartTaskState(string value) { throw null; } + public static Azure.Compute.Batch.BatchStartTaskState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchStartTaskState Running { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchStartTaskState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchStartTaskState left, Azure.Compute.Batch.BatchStartTaskState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchStartTaskState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchStartTaskState left, Azure.Compute.Batch.BatchStartTaskState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchSubtask : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchSubtask() { } + public Azure.Compute.Batch.BatchTaskContainerExecutionInfo ContainerInfo { get { throw null; } } + public System.DateTimeOffset? EndTime { get { throw null; } } + public int? ExitCode { get { throw null; } } + public Azure.Compute.Batch.BatchTaskFailureInfo FailureInfo { get { throw null; } } + public int? Id { get { throw null; } } + public Azure.Compute.Batch.BatchNodeInfo NodeInfo { get { throw null; } } + public Azure.Compute.Batch.BatchSubtaskState? PreviousState { get { throw null; } } + public System.DateTimeOffset? PreviousStateTransitionTime { get { throw null; } } + public Azure.Compute.Batch.BatchTaskExecutionResult? Result { get { throw null; } } + public System.DateTimeOffset? StartTime { get { throw null; } } + public Azure.Compute.Batch.BatchSubtaskState? State { get { throw null; } } + public System.DateTimeOffset? StateTransitionTime { get { throw null; } } + Azure.Compute.Batch.BatchSubtask System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchSubtask System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchSubtaskState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchSubtaskState(string value) { throw null; } + public static Azure.Compute.Batch.BatchSubtaskState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchSubtaskState Preparing { get { throw null; } } + public static Azure.Compute.Batch.BatchSubtaskState Running { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchSubtaskState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchSubtaskState left, Azure.Compute.Batch.BatchSubtaskState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchSubtaskState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchSubtaskState left, Azure.Compute.Batch.BatchSubtaskState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchSupportedImage : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchSupportedImage() { } + public System.DateTimeOffset? BatchSupportEndOfLife { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Capabilities { get { throw null; } } + public Azure.Compute.Batch.ImageReference ImageReference { get { throw null; } } + public string NodeAgentSkuId { get { throw null; } } + public Azure.Compute.Batch.OSType OsType { get { throw null; } } + public Azure.Compute.Batch.ImageVerificationType VerificationType { get { throw null; } } + Azure.Compute.Batch.BatchSupportedImage System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchSupportedImage System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTask : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTask() { } + public Azure.Compute.Batch.AffinityInfo AffinityInfo { get { throw null; } } + public System.Collections.Generic.IReadOnlyList ApplicationPackageReferences { get { throw null; } } + public Azure.Compute.Batch.AuthenticationTokenSettings AuthenticationTokenSettings { get { throw null; } } + public string CommandLine { get { throw null; } } + public Azure.Compute.Batch.BatchTaskConstraints Constraints { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskContainerSettings ContainerSettings { get { throw null; } } + public System.DateTimeOffset? CreationTime { get { throw null; } } + public Azure.Compute.Batch.BatchTaskDependencies DependsOn { get { throw null; } } + public string DisplayName { get { throw null; } } + public System.Collections.Generic.IReadOnlyList EnvironmentSettings { get { throw null; } } + public string ETag { get { throw null; } } + public Azure.Compute.Batch.BatchTaskExecutionInfo ExecutionInfo { get { throw null; } } + public Azure.Compute.Batch.ExitConditions ExitConditions { get { throw null; } } + public string Id { get { throw null; } } + public System.DateTimeOffset? LastModified { get { throw null; } } + public Azure.Compute.Batch.MultiInstanceSettings MultiInstanceSettings { get { throw null; } } + public Azure.Compute.Batch.BatchNodeInfo NodeInfo { get { throw null; } } + public System.Collections.Generic.IReadOnlyList OutputFiles { get { throw null; } } + public Azure.Compute.Batch.BatchTaskState? PreviousState { get { throw null; } } + public System.DateTimeOffset? PreviousStateTransitionTime { get { throw null; } } + public int? RequiredSlots { get { throw null; } } + public System.Collections.Generic.IReadOnlyList ResourceFiles { get { throw null; } } + public Azure.Compute.Batch.BatchTaskState? State { get { throw null; } } + public System.DateTimeOffset? StateTransitionTime { get { throw null; } } + public Azure.Compute.Batch.BatchTaskStatistics Stats { get { throw null; } } + public string Url { get { throw null; } } + public Azure.Compute.Batch.UserIdentity UserIdentity { get { throw null; } } + Azure.Compute.Batch.BatchTask System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTask System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskAddCollectionResult : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchTaskAddCollectionResult() { } + public System.Collections.Generic.IReadOnlyList Value { get { throw null; } } + Azure.Compute.Batch.BatchTaskAddCollectionResult System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskAddCollectionResult System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskAddResult : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchTaskAddResult() { } + public Azure.Compute.Batch.BatchError Error { get { throw null; } } + public string ETag { get { throw null; } } + public System.DateTimeOffset? LastModified { get { throw null; } } + public string Location { get { throw null; } } + public Azure.Compute.Batch.BatchTaskAddStatus Status { get { throw null; } } + public string TaskId { get { throw null; } } + Azure.Compute.Batch.BatchTaskAddResult System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskAddResult System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchTaskAddStatus : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchTaskAddStatus(string value) { throw null; } + public static Azure.Compute.Batch.BatchTaskAddStatus ClientError { get { throw null; } } + public static Azure.Compute.Batch.BatchTaskAddStatus ServerError { get { throw null; } } + public static Azure.Compute.Batch.BatchTaskAddStatus Success { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchTaskAddStatus other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchTaskAddStatus left, Azure.Compute.Batch.BatchTaskAddStatus right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchTaskAddStatus (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchTaskAddStatus left, Azure.Compute.Batch.BatchTaskAddStatus right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchTaskConstraints : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskConstraints() { } + public int? MaxTaskRetryCount { get { throw null; } set { } } + public System.TimeSpan? MaxWallClockTime { get { throw null; } set { } } + public System.TimeSpan? RetentionTime { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskConstraints System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskConstraints System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskContainerExecutionInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskContainerExecutionInfo() { } + public string ContainerId { get { throw null; } set { } } + public string Error { get { throw null; } set { } } + public string State { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskContainerExecutionInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskContainerExecutionInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskContainerSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskContainerSettings(string imageName) { } + public string ContainerRunOptions { get { throw null; } set { } } + public string ImageName { get { throw null; } set { } } + public Azure.Compute.Batch.ContainerRegistryReference Registry { get { throw null; } set { } } + public Azure.Compute.Batch.ContainerWorkingDirectory? WorkingDirectory { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskContainerSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskContainerSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskCounts : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchTaskCounts() { } + public int Active { get { throw null; } } + public int Completed { get { throw null; } } + public int Failed { get { throw null; } } + public int Running { get { throw null; } } + public int Succeeded { get { throw null; } } + Azure.Compute.Batch.BatchTaskCounts System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskCounts System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskCountsResult : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchTaskCountsResult() { } + public Azure.Compute.Batch.BatchTaskCounts TaskCounts { get { throw null; } } + public Azure.Compute.Batch.BatchTaskSlotCounts TaskSlotCounts { get { throw null; } } + Azure.Compute.Batch.BatchTaskCountsResult System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskCountsResult System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskCreateContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskCreateContent(string id, string commandLine) { } + public Azure.Compute.Batch.AffinityInfo AffinityInfo { get { throw null; } set { } } + public System.Collections.Generic.IList ApplicationPackageReferences { get { throw null; } } + public Azure.Compute.Batch.AuthenticationTokenSettings AuthenticationTokenSettings { get { throw null; } set { } } + public string CommandLine { get { throw null; } } + public Azure.Compute.Batch.BatchTaskConstraints Constraints { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskContainerSettings ContainerSettings { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskDependencies DependsOn { get { throw null; } set { } } + public string DisplayName { get { throw null; } set { } } + public System.Collections.Generic.IList EnvironmentSettings { get { throw null; } } + public Azure.Compute.Batch.ExitConditions ExitConditions { get { throw null; } set { } } + public string Id { get { throw null; } } + public Azure.Compute.Batch.MultiInstanceSettings MultiInstanceSettings { get { throw null; } set { } } + public System.Collections.Generic.IList OutputFiles { get { throw null; } } + public int? RequiredSlots { get { throw null; } set { } } + public System.Collections.Generic.IList ResourceFiles { get { throw null; } } + public Azure.Compute.Batch.UserIdentity UserIdentity { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskCreateContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskCreateContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskDependencies : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskDependencies() { } + public System.Collections.Generic.IList TaskIdRanges { get { throw null; } } + public System.Collections.Generic.IList TaskIds { get { throw null; } } + Azure.Compute.Batch.BatchTaskDependencies System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskDependencies System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskExecutionInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskExecutionInfo(int retryCount, int requeueCount) { } + public Azure.Compute.Batch.BatchTaskContainerExecutionInfo ContainerInfo { get { throw null; } set { } } + public System.DateTimeOffset? EndTime { get { throw null; } set { } } + public int? ExitCode { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskFailureInfo FailureInfo { get { throw null; } set { } } + public System.DateTimeOffset? LastRequeueTime { get { throw null; } set { } } + public System.DateTimeOffset? LastRetryTime { get { throw null; } set { } } + public int RequeueCount { get { throw null; } set { } } + public Azure.Compute.Batch.BatchTaskExecutionResult? Result { get { throw null; } set { } } + public int RetryCount { get { throw null; } set { } } + public System.DateTimeOffset? StartTime { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskExecutionInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskExecutionInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchTaskExecutionResult : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchTaskExecutionResult(string value) { throw null; } + public static Azure.Compute.Batch.BatchTaskExecutionResult Failure { get { throw null; } } + public static Azure.Compute.Batch.BatchTaskExecutionResult Success { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchTaskExecutionResult other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchTaskExecutionResult left, Azure.Compute.Batch.BatchTaskExecutionResult right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchTaskExecutionResult (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchTaskExecutionResult left, Azure.Compute.Batch.BatchTaskExecutionResult right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchTaskFailureInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskFailureInfo(Azure.Compute.Batch.ErrorCategory category) { } + public Azure.Compute.Batch.ErrorCategory Category { get { throw null; } set { } } + public string Code { get { throw null; } set { } } + public System.Collections.Generic.IList Details { get { throw null; } } + public string Message { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskFailureInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskFailureInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskGroup : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskGroup(System.Collections.Generic.IEnumerable value) { } + public System.Collections.Generic.IList Value { get { throw null; } } + Azure.Compute.Batch.BatchTaskGroup System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskGroup System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskIdRange : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskIdRange(int start, int end) { } + public int End { get { throw null; } set { } } + public int Start { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskIdRange System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskIdRange System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchTaskInfo() { } + public Azure.Compute.Batch.BatchTaskExecutionInfo ExecutionInfo { get { throw null; } } + public string JobId { get { throw null; } } + public int? SubtaskId { get { throw null; } } + public string TaskId { get { throw null; } } + public Azure.Compute.Batch.BatchTaskState TaskState { get { throw null; } } + public string TaskUrl { get { throw null; } } + Azure.Compute.Batch.BatchTaskInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskSchedulingPolicy : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskSchedulingPolicy(Azure.Compute.Batch.BatchNodeFillType nodeFillType) { } + public Azure.Compute.Batch.BatchNodeFillType NodeFillType { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskSchedulingPolicy System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskSchedulingPolicy System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class BatchTaskSlotCounts : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal BatchTaskSlotCounts() { } + public int Active { get { throw null; } } + public int Completed { get { throw null; } } + public int Failed { get { throw null; } } + public int Running { get { throw null; } } + public int Succeeded { get { throw null; } } + Azure.Compute.Batch.BatchTaskSlotCounts System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskSlotCounts System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct BatchTaskState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public BatchTaskState(string value) { throw null; } + public static Azure.Compute.Batch.BatchTaskState Active { get { throw null; } } + public static Azure.Compute.Batch.BatchTaskState Completed { get { throw null; } } + public static Azure.Compute.Batch.BatchTaskState Preparing { get { throw null; } } + public static Azure.Compute.Batch.BatchTaskState Running { get { throw null; } } + public bool Equals(Azure.Compute.Batch.BatchTaskState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.BatchTaskState left, Azure.Compute.Batch.BatchTaskState right) { throw null; } + public static implicit operator Azure.Compute.Batch.BatchTaskState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.BatchTaskState left, Azure.Compute.Batch.BatchTaskState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class BatchTaskStatistics : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public BatchTaskStatistics(string url, System.DateTimeOffset startTime, System.DateTimeOffset lastUpdateTime, System.TimeSpan userCpuTime, System.TimeSpan kernelCpuTime, System.TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, System.TimeSpan waitTime) { } + public System.TimeSpan KernelCpuTime { get { throw null; } set { } } + public System.DateTimeOffset LastUpdateTime { get { throw null; } set { } } + public float ReadIOGiB { get { throw null; } set { } } + public long ReadIOps { get { throw null; } set { } } + public System.DateTimeOffset StartTime { get { throw null; } set { } } + public string Url { get { throw null; } set { } } + public System.TimeSpan UserCpuTime { get { throw null; } set { } } + public System.TimeSpan WaitTime { get { throw null; } set { } } + public System.TimeSpan WallClockTime { get { throw null; } set { } } + public float WriteIOGiB { get { throw null; } set { } } + public long WriteIOps { get { throw null; } set { } } + Azure.Compute.Batch.BatchTaskStatistics System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.BatchTaskStatistics System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct CachingType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public CachingType(string value) { throw null; } + public static Azure.Compute.Batch.CachingType None { get { throw null; } } + public static Azure.Compute.Batch.CachingType ReadOnly { get { throw null; } } + public static Azure.Compute.Batch.CachingType ReadWrite { get { throw null; } } + public bool Equals(Azure.Compute.Batch.CachingType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.CachingType left, Azure.Compute.Batch.CachingType right) { throw null; } + public static implicit operator Azure.Compute.Batch.CachingType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.CachingType left, Azure.Compute.Batch.CachingType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class CifsMountConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public CifsMountConfiguration(string username, string source, string relativeMountPath, string password) { } + public string MountOptions { get { throw null; } set { } } + public string Password { get { throw null; } set { } } + public string RelativeMountPath { get { throw null; } set { } } + public string Source { get { throw null; } set { } } + public string Username { get { throw null; } set { } } + Azure.Compute.Batch.CifsMountConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.CifsMountConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public static partial class ComputeBatchModelFactory + { + public static Azure.Compute.Batch.AutoScaleRun AutoScaleRun(System.DateTimeOffset timestamp = default(System.DateTimeOffset), string results = null, Azure.Compute.Batch.AutoScaleRunError error = null) { throw null; } + public static Azure.Compute.Batch.AutoScaleRunError AutoScaleRunError(string code = null, string message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } + public static Azure.Compute.Batch.BatchApplication BatchApplication(string id = null, string displayName = null, System.Collections.Generic.IEnumerable versions = null) { throw null; } + public static Azure.Compute.Batch.BatchError BatchError(string code = null, Azure.Compute.Batch.BatchErrorMessage message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } + public static Azure.Compute.Batch.BatchErrorDetail BatchErrorDetail(string key = null, string value = null) { throw null; } + public static Azure.Compute.Batch.BatchErrorMessage BatchErrorMessage(string lang = null, string value = null) { throw null; } + public static Azure.Compute.Batch.BatchJob BatchJob(string id = null, string displayName = null, bool? usesTaskDependencies = default(bool?), string url = null, string eTag = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobState? state = default(Azure.Compute.Batch.BatchJobState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobState? previousState = default(Azure.Compute.Batch.BatchJobState?), System.DateTimeOffset? previousStateTransitionTime = default(System.DateTimeOffset?), int? priority = default(int?), bool? allowTaskPreemption = default(bool?), int? maxParallelTasks = default(int?), Azure.Compute.Batch.BatchJobConstraints constraints = null, Azure.Compute.Batch.BatchJobManagerTask jobManagerTask = null, Azure.Compute.Batch.BatchJobPreparationTask jobPreparationTask = null, Azure.Compute.Batch.BatchJobReleaseTask jobReleaseTask = null, System.Collections.Generic.IEnumerable commonEnvironmentSettings = null, Azure.Compute.Batch.BatchPoolInfo poolInfo = null, Azure.Compute.Batch.OnAllBatchTasksComplete? onAllTasksComplete = default(Azure.Compute.Batch.OnAllBatchTasksComplete?), Azure.Compute.Batch.OnBatchTaskFailure? onTaskFailure = default(Azure.Compute.Batch.OnBatchTaskFailure?), Azure.Compute.Batch.BatchJobNetworkConfiguration networkConfiguration = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchJobExecutionInfo executionInfo = null, Azure.Compute.Batch.BatchJobStatistics stats = null) { throw null; } + public static Azure.Compute.Batch.BatchJobCreateContent BatchJobCreateContent(string id = null, string displayName = null, bool? usesTaskDependencies = default(bool?), int? priority = default(int?), bool? allowTaskPreemption = default(bool?), int? maxParallelTasks = default(int?), Azure.Compute.Batch.BatchJobConstraints constraints = null, Azure.Compute.Batch.BatchJobManagerTask jobManagerTask = null, Azure.Compute.Batch.BatchJobPreparationTask jobPreparationTask = null, Azure.Compute.Batch.BatchJobReleaseTask jobReleaseTask = null, System.Collections.Generic.IEnumerable commonEnvironmentSettings = null, Azure.Compute.Batch.BatchPoolInfo poolInfo = null, Azure.Compute.Batch.OnAllBatchTasksComplete? onAllTasksComplete = default(Azure.Compute.Batch.OnAllBatchTasksComplete?), Azure.Compute.Batch.OnBatchTaskFailure? onTaskFailure = default(Azure.Compute.Batch.OnBatchTaskFailure?), Azure.Compute.Batch.BatchJobNetworkConfiguration networkConfiguration = null, System.Collections.Generic.IEnumerable metadata = null) { throw null; } + public static Azure.Compute.Batch.BatchJobPreparationAndReleaseTaskStatus BatchJobPreparationAndReleaseTaskStatus(string poolId = null, string nodeId = null, string nodeUrl = null, Azure.Compute.Batch.BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo = null, Azure.Compute.Batch.BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo = null) { throw null; } + public static Azure.Compute.Batch.BatchJobPreparationTaskExecutionInfo BatchJobPreparationTaskExecutionInfo(System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset? endTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobPreparationTaskState state = default(Azure.Compute.Batch.BatchJobPreparationTaskState), string taskRootDirectory = null, string taskRootDirectoryUrl = null, int? exitCode = default(int?), Azure.Compute.Batch.BatchTaskContainerExecutionInfo containerInfo = null, Azure.Compute.Batch.BatchTaskFailureInfo failureInfo = null, int retryCount = 0, System.DateTimeOffset? lastRetryTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchTaskExecutionResult? result = default(Azure.Compute.Batch.BatchTaskExecutionResult?)) { throw null; } + public static Azure.Compute.Batch.BatchJobReleaseTaskExecutionInfo BatchJobReleaseTaskExecutionInfo(System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset? endTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobReleaseTaskState state = default(Azure.Compute.Batch.BatchJobReleaseTaskState), string taskRootDirectory = null, string taskRootDirectoryUrl = null, int? exitCode = default(int?), Azure.Compute.Batch.BatchTaskContainerExecutionInfo containerInfo = null, Azure.Compute.Batch.BatchTaskFailureInfo failureInfo = null, Azure.Compute.Batch.BatchTaskExecutionResult? result = default(Azure.Compute.Batch.BatchTaskExecutionResult?)) { throw null; } + public static Azure.Compute.Batch.BatchJobSchedule BatchJobSchedule(string id = null, string displayName = null, string url = null, string eTag = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobScheduleState? state = default(Azure.Compute.Batch.BatchJobScheduleState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobScheduleState? previousState = default(Azure.Compute.Batch.BatchJobScheduleState?), System.DateTimeOffset? previousStateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchJobScheduleConfiguration schedule = null, Azure.Compute.Batch.BatchJobSpecification jobSpecification = null, Azure.Compute.Batch.BatchJobScheduleExecutionInfo executionInfo = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchJobScheduleStatistics stats = null) { throw null; } + public static Azure.Compute.Batch.BatchJobScheduleCreateContent BatchJobScheduleCreateContent(string id = null, string displayName = null, Azure.Compute.Batch.BatchJobScheduleConfiguration schedule = null, Azure.Compute.Batch.BatchJobSpecification jobSpecification = null, System.Collections.Generic.IEnumerable metadata = null) { throw null; } + public static Azure.Compute.Batch.BatchNode BatchNode(string id = null, string url = null, Azure.Compute.Batch.BatchNodeState? state = default(Azure.Compute.Batch.BatchNodeState?), Azure.Compute.Batch.SchedulingState? schedulingState = default(Azure.Compute.Batch.SchedulingState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), System.DateTimeOffset? lastBootTime = default(System.DateTimeOffset?), System.DateTimeOffset? allocationTime = default(System.DateTimeOffset?), string ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = default(int?), int? runningTasksCount = default(int?), int? runningTaskSlotsCount = default(int?), int? totalTasksSucceeded = default(int?), System.Collections.Generic.IEnumerable recentTasks = null, Azure.Compute.Batch.BatchStartTask startTask = null, Azure.Compute.Batch.BatchStartTaskInfo startTaskInfo = null, System.Collections.Generic.IEnumerable errors = null, bool? isDedicated = default(bool?), Azure.Compute.Batch.BatchNodeEndpointConfiguration endpointConfiguration = null, Azure.Compute.Batch.BatchNodeAgentInfo nodeAgentInfo = null, Azure.Compute.Batch.VirtualMachineInfo virtualMachineInfo = null) { throw null; } + public static Azure.Compute.Batch.BatchNodeAgentInfo BatchNodeAgentInfo(string version = null, System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset)) { throw null; } + public static Azure.Compute.Batch.BatchNodeCounts BatchNodeCounts(int creating = 0, int idle = 0, int offline = 0, int preempted = 0, int rebooting = 0, int reimaging = 0, int running = 0, int starting = 0, int startTaskFailed = 0, int leavingPool = 0, int unknown = 0, int unusable = 0, int waitingForStartTask = 0, int total = 0, int upgradingOs = 0) { throw null; } + public static Azure.Compute.Batch.BatchNodeEndpointConfiguration BatchNodeEndpointConfiguration(System.Collections.Generic.IEnumerable inboundEndpoints = null) { throw null; } + public static Azure.Compute.Batch.BatchNodeError BatchNodeError(string code = null, string message = null, System.Collections.Generic.IEnumerable errorDetails = null) { throw null; } + public static Azure.Compute.Batch.BatchNodeFile BatchNodeFile(string name = null, string url = null, bool? isDirectory = default(bool?), Azure.Compute.Batch.FileProperties properties = null) { throw null; } + public static Azure.Compute.Batch.BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(string remoteLoginIpAddress = null, int remoteLoginPort = 0) { throw null; } + public static Azure.Compute.Batch.BatchNodeUserCreateContent BatchNodeUserCreateContent(string name = null, bool? isAdmin = default(bool?), System.DateTimeOffset? expiryTime = default(System.DateTimeOffset?), string password = null, string sshPublicKey = null) { throw null; } + public static Azure.Compute.Batch.BatchNodeVMExtension BatchNodeVMExtension(string provisioningState = null, Azure.Compute.Batch.VMExtension vmExtension = null, Azure.Compute.Batch.VMExtensionInstanceView instanceView = null) { throw null; } + public static Azure.Compute.Batch.BatchPool BatchPool(string id = null, string displayName = null, string url = null, string eTag = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchPoolState? state = default(Azure.Compute.Batch.BatchPoolState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.AllocationState? allocationState = default(Azure.Compute.Batch.AllocationState?), System.DateTimeOffset? allocationStateTransitionTime = default(System.DateTimeOffset?), string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IEnumerable resizeErrors = null, System.Collections.Generic.IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = default(int?), int? currentLowPriorityNodes = default(int?), int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), Azure.Compute.Batch.AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, Azure.Compute.Batch.BatchPoolStatistics stats = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchPoolIdentity identity = null, Azure.Compute.Batch.BatchNodeCommunicationMode? targetNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.BatchNodeCommunicationMode? currentNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolCreateContent BatchPoolCreateContent(string id = null, string displayName = null, string vmSize = null, Azure.Compute.Batch.VirtualMachineConfiguration virtualMachineConfiguration = null, System.TimeSpan? resizeTimeout = default(System.TimeSpan?), System.Collections.Generic.IDictionary resourceTags = null, int? targetDedicatedNodes = default(int?), int? targetLowPriorityNodes = default(int?), bool? enableAutoScale = default(bool?), string autoScaleFormula = null, System.TimeSpan? autoScaleEvaluationInterval = default(System.TimeSpan?), bool? enableInterNodeCommunication = default(bool?), Azure.Compute.Batch.NetworkConfiguration networkConfiguration = null, Azure.Compute.Batch.BatchStartTask startTask = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = default(int?), Azure.Compute.Batch.BatchTaskSchedulingPolicy taskSchedulingPolicy = null, System.Collections.Generic.IEnumerable userAccounts = null, System.Collections.Generic.IEnumerable metadata = null, System.Collections.Generic.IEnumerable mountConfiguration = null, Azure.Compute.Batch.BatchNodeCommunicationMode? targetNodeCommunicationMode = default(Azure.Compute.Batch.BatchNodeCommunicationMode?), Azure.Compute.Batch.UpgradePolicy upgradePolicy = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolIdentity BatchPoolIdentity(Azure.Compute.Batch.BatchPoolIdentityType type = default(Azure.Compute.Batch.BatchPoolIdentityType), System.Collections.Generic.IEnumerable userAssignedIdentities = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolNodeCounts BatchPoolNodeCounts(string poolId = null, Azure.Compute.Batch.BatchNodeCounts dedicated = null, Azure.Compute.Batch.BatchNodeCounts lowPriority = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolResourceStatistics BatchPoolResourceStatistics(System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), float avgCpuPercentage = 0f, float avgMemoryGiB = 0f, float peakMemoryGiB = 0f, float avgDiskGiB = 0f, float peakDiskGiB = 0f, long diskReadIOps = (long)0, long diskWriteIOps = (long)0, float diskReadGiB = 0f, float diskWriteGiB = 0f, float networkReadGiB = 0f, float networkWriteGiB = 0f) { throw null; } + public static Azure.Compute.Batch.BatchPoolStatistics BatchPoolStatistics(string url = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), Azure.Compute.Batch.BatchPoolUsageStatistics usageStats = null, Azure.Compute.Batch.BatchPoolResourceStatistics resourceStats = null) { throw null; } + public static Azure.Compute.Batch.BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset endTime = default(System.DateTimeOffset), string vmSize = null, float totalCoreHours = 0f) { throw null; } + public static Azure.Compute.Batch.BatchPoolUsageStatistics BatchPoolUsageStatistics(System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset lastUpdateTime = default(System.DateTimeOffset), System.TimeSpan dedicatedCoreTime = default(System.TimeSpan)) { throw null; } + public static Azure.Compute.Batch.BatchStartTaskInfo BatchStartTaskInfo(Azure.Compute.Batch.BatchStartTaskState state = default(Azure.Compute.Batch.BatchStartTaskState), System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset? endTime = default(System.DateTimeOffset?), int? exitCode = default(int?), Azure.Compute.Batch.BatchTaskContainerExecutionInfo containerInfo = null, Azure.Compute.Batch.BatchTaskFailureInfo failureInfo = null, int retryCount = 0, System.DateTimeOffset? lastRetryTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchTaskExecutionResult? result = default(Azure.Compute.Batch.BatchTaskExecutionResult?)) { throw null; } + public static Azure.Compute.Batch.BatchSubtask BatchSubtask(int? id = default(int?), Azure.Compute.Batch.BatchNodeInfo nodeInfo = null, System.DateTimeOffset? startTime = default(System.DateTimeOffset?), System.DateTimeOffset? endTime = default(System.DateTimeOffset?), int? exitCode = default(int?), Azure.Compute.Batch.BatchTaskContainerExecutionInfo containerInfo = null, Azure.Compute.Batch.BatchTaskFailureInfo failureInfo = null, Azure.Compute.Batch.BatchSubtaskState? state = default(Azure.Compute.Batch.BatchSubtaskState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchSubtaskState? previousState = default(Azure.Compute.Batch.BatchSubtaskState?), System.DateTimeOffset? previousStateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchTaskExecutionResult? result = default(Azure.Compute.Batch.BatchTaskExecutionResult?)) { throw null; } + public static Azure.Compute.Batch.BatchSupportedImage BatchSupportedImage(string nodeAgentSkuId = null, Azure.Compute.Batch.ImageReference imageReference = null, Azure.Compute.Batch.OSType osType = default(Azure.Compute.Batch.OSType), System.Collections.Generic.IEnumerable capabilities = null, System.DateTimeOffset? batchSupportEndOfLife = default(System.DateTimeOffset?), Azure.Compute.Batch.ImageVerificationType verificationType = default(Azure.Compute.Batch.ImageVerificationType)) { throw null; } + public static Azure.Compute.Batch.BatchTask BatchTask(string id = null, string displayName = null, string url = null, string eTag = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), Azure.Compute.Batch.ExitConditions exitConditions = null, Azure.Compute.Batch.BatchTaskState? state = default(Azure.Compute.Batch.BatchTaskState?), System.DateTimeOffset? stateTransitionTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchTaskState? previousState = default(Azure.Compute.Batch.BatchTaskState?), System.DateTimeOffset? previousStateTransitionTime = default(System.DateTimeOffset?), string commandLine = null, Azure.Compute.Batch.BatchTaskContainerSettings containerSettings = null, System.Collections.Generic.IEnumerable resourceFiles = null, System.Collections.Generic.IEnumerable outputFiles = null, System.Collections.Generic.IEnumerable environmentSettings = null, Azure.Compute.Batch.AffinityInfo affinityInfo = null, Azure.Compute.Batch.BatchTaskConstraints constraints = null, int? requiredSlots = default(int?), Azure.Compute.Batch.UserIdentity userIdentity = null, Azure.Compute.Batch.BatchTaskExecutionInfo executionInfo = null, Azure.Compute.Batch.BatchNodeInfo nodeInfo = null, Azure.Compute.Batch.MultiInstanceSettings multiInstanceSettings = null, Azure.Compute.Batch.BatchTaskStatistics stats = null, Azure.Compute.Batch.BatchTaskDependencies dependsOn = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, Azure.Compute.Batch.AuthenticationTokenSettings authenticationTokenSettings = null) { throw null; } + public static Azure.Compute.Batch.BatchTaskAddCollectionResult BatchTaskAddCollectionResult(System.Collections.Generic.IEnumerable value = null) { throw null; } + public static Azure.Compute.Batch.BatchTaskAddResult BatchTaskAddResult(Azure.Compute.Batch.BatchTaskAddStatus status = default(Azure.Compute.Batch.BatchTaskAddStatus), string taskId = null, string eTag = null, System.DateTimeOffset? lastModified = default(System.DateTimeOffset?), string location = null, Azure.Compute.Batch.BatchError error = null) { throw null; } + public static Azure.Compute.Batch.BatchTaskCounts BatchTaskCounts(int active = 0, int running = 0, int completed = 0, int succeeded = 0, int failed = 0) { throw null; } + public static Azure.Compute.Batch.BatchTaskCountsResult BatchTaskCountsResult(Azure.Compute.Batch.BatchTaskCounts taskCounts = null, Azure.Compute.Batch.BatchTaskSlotCounts taskSlotCounts = null) { throw null; } + public static Azure.Compute.Batch.BatchTaskCreateContent BatchTaskCreateContent(string id = null, string displayName = null, Azure.Compute.Batch.ExitConditions exitConditions = null, string commandLine = null, Azure.Compute.Batch.BatchTaskContainerSettings containerSettings = null, System.Collections.Generic.IEnumerable resourceFiles = null, System.Collections.Generic.IEnumerable outputFiles = null, System.Collections.Generic.IEnumerable environmentSettings = null, Azure.Compute.Batch.AffinityInfo affinityInfo = null, Azure.Compute.Batch.BatchTaskConstraints constraints = null, int? requiredSlots = default(int?), Azure.Compute.Batch.UserIdentity userIdentity = null, Azure.Compute.Batch.MultiInstanceSettings multiInstanceSettings = null, Azure.Compute.Batch.BatchTaskDependencies dependsOn = null, System.Collections.Generic.IEnumerable applicationPackageReferences = null, Azure.Compute.Batch.AuthenticationTokenSettings authenticationTokenSettings = null) { throw null; } + public static Azure.Compute.Batch.BatchTaskInfo BatchTaskInfo(string taskUrl = null, string jobId = null, string taskId = null, int? subtaskId = default(int?), Azure.Compute.Batch.BatchTaskState taskState = default(Azure.Compute.Batch.BatchTaskState), Azure.Compute.Batch.BatchTaskExecutionInfo executionInfo = null) { throw null; } + public static Azure.Compute.Batch.BatchTaskSlotCounts BatchTaskSlotCounts(int active = 0, int running = 0, int completed = 0, int succeeded = 0, int failed = 0) { throw null; } + public static Azure.Compute.Batch.FileProperties FileProperties(System.DateTimeOffset? creationTime = default(System.DateTimeOffset?), System.DateTimeOffset lastModified = default(System.DateTimeOffset), long contentLength = (long)0, string contentType = null, string fileMode = null) { throw null; } + public static Azure.Compute.Batch.ImageReference ImageReference(string publisher = null, string offer = null, string sku = null, string version = null, string virtualMachineImageId = null, string exactVersion = null) { throw null; } + public static Azure.Compute.Batch.InboundEndpoint InboundEndpoint(string name = null, Azure.Compute.Batch.InboundEndpointProtocol protocol = default(Azure.Compute.Batch.InboundEndpointProtocol), string publicIpAddress = null, string publicFQDN = null, int frontendPort = 0, int backendPort = 0) { throw null; } + public static Azure.Compute.Batch.InstanceViewStatus InstanceViewStatus(string code = null, string displayStatus = null, Azure.Compute.Batch.StatusLevelTypes? level = default(Azure.Compute.Batch.StatusLevelTypes?), string message = null, System.DateTimeOffset? time = default(System.DateTimeOffset?)) { throw null; } + public static Azure.Compute.Batch.ResizeError ResizeError(string code = null, string message = null, System.Collections.Generic.IEnumerable values = null) { throw null; } + public static Azure.Compute.Batch.UploadBatchServiceLogsContent UploadBatchServiceLogsContent(string containerUrl = null, System.DateTimeOffset startTime = default(System.DateTimeOffset), System.DateTimeOffset? endTime = default(System.DateTimeOffset?), Azure.Compute.Batch.BatchNodeIdentityReference identityReference = null) { throw null; } + public static Azure.Compute.Batch.UploadBatchServiceLogsResult UploadBatchServiceLogsResult(string virtualDirectoryName = null, int numberOfFilesUploaded = 0) { throw null; } + public static Azure.Compute.Batch.UserAssignedIdentity UserAssignedIdentity(string resourceId = null, string clientId = null, string principalId = null) { throw null; } + public static Azure.Compute.Batch.VirtualMachineInfo VirtualMachineInfo(Azure.Compute.Batch.ImageReference imageReference = null, string scaleSetVmResourceId = null) { throw null; } + public static Azure.Compute.Batch.VMExtensionInstanceView VMExtensionInstanceView(string name = null, System.Collections.Generic.IEnumerable statuses = null, System.Collections.Generic.IEnumerable subStatuses = null) { throw null; } + } + public partial class ContainerConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ContainerConfiguration(Azure.Compute.Batch.ContainerType type) { } + public System.Collections.Generic.IList ContainerImageNames { get { throw null; } } + public System.Collections.Generic.IList ContainerRegistries { get { throw null; } } + public Azure.Compute.Batch.ContainerType Type { get { throw null; } set { } } + Azure.Compute.Batch.ContainerConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ContainerConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ContainerRegistryReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ContainerRegistryReference() { } + public Azure.Compute.Batch.BatchNodeIdentityReference IdentityReference { get { throw null; } set { } } + public string Password { get { throw null; } set { } } + public string RegistryServer { get { throw null; } set { } } + public string Username { get { throw null; } set { } } + Azure.Compute.Batch.ContainerRegistryReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ContainerRegistryReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct ContainerType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public ContainerType(string value) { throw null; } + public static Azure.Compute.Batch.ContainerType CriCompatible { get { throw null; } } + public static Azure.Compute.Batch.ContainerType DockerCompatible { get { throw null; } } + public bool Equals(Azure.Compute.Batch.ContainerType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.ContainerType left, Azure.Compute.Batch.ContainerType right) { throw null; } + public static implicit operator Azure.Compute.Batch.ContainerType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.ContainerType left, Azure.Compute.Batch.ContainerType right) { throw null; } + public override string ToString() { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct ContainerWorkingDirectory : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public ContainerWorkingDirectory(string value) { throw null; } + public static Azure.Compute.Batch.ContainerWorkingDirectory ContainerImageDefault { get { throw null; } } + public static Azure.Compute.Batch.ContainerWorkingDirectory TaskWorkingDirectory { get { throw null; } } + public bool Equals(Azure.Compute.Batch.ContainerWorkingDirectory other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.ContainerWorkingDirectory left, Azure.Compute.Batch.ContainerWorkingDirectory right) { throw null; } + public static implicit operator Azure.Compute.Batch.ContainerWorkingDirectory (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.ContainerWorkingDirectory left, Azure.Compute.Batch.ContainerWorkingDirectory right) { throw null; } + public override string ToString() { throw null; } + } + public partial class DataDisk : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DataDisk(int logicalUnitNumber, int diskSizeGb) { } + public Azure.Compute.Batch.CachingType? Caching { get { throw null; } set { } } + public int DiskSizeGb { get { throw null; } set { } } + public int LogicalUnitNumber { get { throw null; } set { } } + public Azure.Compute.Batch.StorageAccountType? StorageAccountType { get { throw null; } set { } } + Azure.Compute.Batch.DataDisk System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DataDisk System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct DependencyAction : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public DependencyAction(string value) { throw null; } + public static Azure.Compute.Batch.DependencyAction Block { get { throw null; } } + public static Azure.Compute.Batch.DependencyAction Satisfy { get { throw null; } } + public bool Equals(Azure.Compute.Batch.DependencyAction other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.DependencyAction left, Azure.Compute.Batch.DependencyAction right) { throw null; } + public static implicit operator Azure.Compute.Batch.DependencyAction (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.DependencyAction left, Azure.Compute.Batch.DependencyAction right) { throw null; } + public override string ToString() { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct DiffDiskPlacement : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public DiffDiskPlacement(string value) { throw null; } + public static Azure.Compute.Batch.DiffDiskPlacement CacheDisk { get { throw null; } } + public bool Equals(Azure.Compute.Batch.DiffDiskPlacement other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.DiffDiskPlacement left, Azure.Compute.Batch.DiffDiskPlacement right) { throw null; } + public static implicit operator Azure.Compute.Batch.DiffDiskPlacement (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.DiffDiskPlacement left, Azure.Compute.Batch.DiffDiskPlacement right) { throw null; } + public override string ToString() { throw null; } + } + public partial class DiffDiskSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DiffDiskSettings() { } + public Azure.Compute.Batch.DiffDiskPlacement? Placement { get { throw null; } set { } } + Azure.Compute.Batch.DiffDiskSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiffDiskSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct DisableBatchJobOption : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public DisableBatchJobOption(string value) { throw null; } + public static Azure.Compute.Batch.DisableBatchJobOption Requeue { get { throw null; } } + public static Azure.Compute.Batch.DisableBatchJobOption Terminate { get { throw null; } } + public static Azure.Compute.Batch.DisableBatchJobOption Wait { get { throw null; } } + public bool Equals(Azure.Compute.Batch.DisableBatchJobOption other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.DisableBatchJobOption left, Azure.Compute.Batch.DisableBatchJobOption right) { throw null; } + public static implicit operator Azure.Compute.Batch.DisableBatchJobOption (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.DisableBatchJobOption left, Azure.Compute.Batch.DisableBatchJobOption right) { throw null; } + public override string ToString() { throw null; } + } + public partial class DiskEncryptionConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public DiskEncryptionConfiguration() { } + public System.Collections.Generic.IList Targets { get { throw null; } } + Azure.Compute.Batch.DiskEncryptionConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.DiskEncryptionConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct DiskEncryptionTarget : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public DiskEncryptionTarget(string value) { throw null; } + public static Azure.Compute.Batch.DiskEncryptionTarget OsDisk { get { throw null; } } + public static Azure.Compute.Batch.DiskEncryptionTarget TemporaryDisk { get { throw null; } } + public bool Equals(Azure.Compute.Batch.DiskEncryptionTarget other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.DiskEncryptionTarget left, Azure.Compute.Batch.DiskEncryptionTarget right) { throw null; } + public static implicit operator Azure.Compute.Batch.DiskEncryptionTarget (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.DiskEncryptionTarget left, Azure.Compute.Batch.DiskEncryptionTarget right) { throw null; } + public override string ToString() { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct DynamicVNetAssignmentScope : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public DynamicVNetAssignmentScope(string value) { throw null; } + public static Azure.Compute.Batch.DynamicVNetAssignmentScope Job { get { throw null; } } + public static Azure.Compute.Batch.DynamicVNetAssignmentScope None { get { throw null; } } + public bool Equals(Azure.Compute.Batch.DynamicVNetAssignmentScope other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.DynamicVNetAssignmentScope left, Azure.Compute.Batch.DynamicVNetAssignmentScope right) { throw null; } + public static implicit operator Azure.Compute.Batch.DynamicVNetAssignmentScope (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.DynamicVNetAssignmentScope left, Azure.Compute.Batch.DynamicVNetAssignmentScope right) { throw null; } + public override string ToString() { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct ElevationLevel : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public ElevationLevel(string value) { throw null; } + public static Azure.Compute.Batch.ElevationLevel Admin { get { throw null; } } + public static Azure.Compute.Batch.ElevationLevel NonAdmin { get { throw null; } } + public bool Equals(Azure.Compute.Batch.ElevationLevel other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.ElevationLevel left, Azure.Compute.Batch.ElevationLevel right) { throw null; } + public static implicit operator Azure.Compute.Batch.ElevationLevel (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.ElevationLevel left, Azure.Compute.Batch.ElevationLevel right) { throw null; } + public override string ToString() { throw null; } + } + public partial class EnvironmentSetting : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public EnvironmentSetting(string name) { } + public string Name { get { throw null; } set { } } + public string Value { get { throw null; } set { } } + Azure.Compute.Batch.EnvironmentSetting System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.EnvironmentSetting System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct ErrorCategory : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public ErrorCategory(string value) { throw null; } + public static Azure.Compute.Batch.ErrorCategory ServerError { get { throw null; } } + public static Azure.Compute.Batch.ErrorCategory UserError { get { throw null; } } + public bool Equals(Azure.Compute.Batch.ErrorCategory other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.ErrorCategory left, Azure.Compute.Batch.ErrorCategory right) { throw null; } + public static implicit operator Azure.Compute.Batch.ErrorCategory (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.ErrorCategory left, Azure.Compute.Batch.ErrorCategory right) { throw null; } + public override string ToString() { throw null; } + } + public partial class ExitCodeMapping : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ExitCodeMapping(int code, Azure.Compute.Batch.ExitOptions exitOptions) { } + public int Code { get { throw null; } set { } } + public Azure.Compute.Batch.ExitOptions ExitOptions { get { throw null; } set { } } + Azure.Compute.Batch.ExitCodeMapping System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ExitCodeMapping System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ExitCodeRangeMapping : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ExitCodeRangeMapping(int start, int end, Azure.Compute.Batch.ExitOptions exitOptions) { } + public int End { get { throw null; } set { } } + public Azure.Compute.Batch.ExitOptions ExitOptions { get { throw null; } set { } } + public int Start { get { throw null; } set { } } + Azure.Compute.Batch.ExitCodeRangeMapping System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ExitCodeRangeMapping System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ExitConditions : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ExitConditions() { } + public Azure.Compute.Batch.ExitOptions Default { get { throw null; } set { } } + public System.Collections.Generic.IList ExitCodeRanges { get { throw null; } } + public System.Collections.Generic.IList ExitCodes { get { throw null; } } + public Azure.Compute.Batch.ExitOptions FileUploadError { get { throw null; } set { } } + public Azure.Compute.Batch.ExitOptions PreProcessingError { get { throw null; } set { } } + Azure.Compute.Batch.ExitConditions System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ExitConditions System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ExitOptions : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ExitOptions() { } + public Azure.Compute.Batch.DependencyAction? DependencyAction { get { throw null; } set { } } + public Azure.Compute.Batch.BatchJobAction? JobAction { get { throw null; } set { } } + Azure.Compute.Batch.ExitOptions System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ExitOptions System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class FileProperties : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal FileProperties() { } + public long ContentLength { get { throw null; } } + public string ContentType { get { throw null; } } + public System.DateTimeOffset? CreationTime { get { throw null; } } + public string FileMode { get { throw null; } } + public System.DateTimeOffset LastModified { get { throw null; } } + Azure.Compute.Batch.FileProperties System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.FileProperties System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class HttpHeader : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public HttpHeader(string name) { } + public string Name { get { throw null; } set { } } + public string Value { get { throw null; } set { } } + Azure.Compute.Batch.HttpHeader System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.HttpHeader System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ImageReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ImageReference() { } + public string ExactVersion { get { throw null; } } + public string Offer { get { throw null; } set { } } + public string Publisher { get { throw null; } set { } } + public string Sku { get { throw null; } set { } } + public string Version { get { throw null; } set { } } + public string VirtualMachineImageId { get { throw null; } set { } } + Azure.Compute.Batch.ImageReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ImageReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct ImageVerificationType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public ImageVerificationType(string value) { throw null; } + public static Azure.Compute.Batch.ImageVerificationType Unverified { get { throw null; } } + public static Azure.Compute.Batch.ImageVerificationType Verified { get { throw null; } } + public bool Equals(Azure.Compute.Batch.ImageVerificationType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.ImageVerificationType left, Azure.Compute.Batch.ImageVerificationType right) { throw null; } + public static implicit operator Azure.Compute.Batch.ImageVerificationType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.ImageVerificationType left, Azure.Compute.Batch.ImageVerificationType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class InboundEndpoint : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal InboundEndpoint() { } + public int BackendPort { get { throw null; } } + public int FrontendPort { get { throw null; } } + public string Name { get { throw null; } } + public Azure.Compute.Batch.InboundEndpointProtocol Protocol { get { throw null; } } + public string PublicFQDN { get { throw null; } } + public string PublicIpAddress { get { throw null; } } + Azure.Compute.Batch.InboundEndpoint System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.InboundEndpoint System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct InboundEndpointProtocol : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public InboundEndpointProtocol(string value) { throw null; } + public static Azure.Compute.Batch.InboundEndpointProtocol Tcp { get { throw null; } } + public static Azure.Compute.Batch.InboundEndpointProtocol Udp { get { throw null; } } + public bool Equals(Azure.Compute.Batch.InboundEndpointProtocol other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.InboundEndpointProtocol left, Azure.Compute.Batch.InboundEndpointProtocol right) { throw null; } + public static implicit operator Azure.Compute.Batch.InboundEndpointProtocol (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.InboundEndpointProtocol left, Azure.Compute.Batch.InboundEndpointProtocol right) { throw null; } + public override string ToString() { throw null; } + } + public partial class InboundNatPool : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public InboundNatPool(string name, Azure.Compute.Batch.InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd) { } + public int BackendPort { get { throw null; } set { } } + public int FrontendPortRangeEnd { get { throw null; } set { } } + public int FrontendPortRangeStart { get { throw null; } set { } } + public string Name { get { throw null; } set { } } + public System.Collections.Generic.IList NetworkSecurityGroupRules { get { throw null; } } + public Azure.Compute.Batch.InboundEndpointProtocol Protocol { get { throw null; } set { } } + Azure.Compute.Batch.InboundNatPool System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.InboundNatPool System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class InstanceViewStatus : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal InstanceViewStatus() { } + public string Code { get { throw null; } } + public string DisplayStatus { get { throw null; } } + public Azure.Compute.Batch.StatusLevelTypes? Level { get { throw null; } } + public string Message { get { throw null; } } + public System.DateTimeOffset? Time { get { throw null; } } + Azure.Compute.Batch.InstanceViewStatus System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.InstanceViewStatus System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct IpAddressProvisioningType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public IpAddressProvisioningType(string value) { throw null; } + public static Azure.Compute.Batch.IpAddressProvisioningType BatchManaged { get { throw null; } } + public static Azure.Compute.Batch.IpAddressProvisioningType NoPublicIpAddresses { get { throw null; } } + public static Azure.Compute.Batch.IpAddressProvisioningType UserManaged { get { throw null; } } + public bool Equals(Azure.Compute.Batch.IpAddressProvisioningType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.IpAddressProvisioningType left, Azure.Compute.Batch.IpAddressProvisioningType right) { throw null; } + public static implicit operator Azure.Compute.Batch.IpAddressProvisioningType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.IpAddressProvisioningType left, Azure.Compute.Batch.IpAddressProvisioningType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class LinuxUserConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public LinuxUserConfiguration() { } + public int? Gid { get { throw null; } set { } } + public string SshPrivateKey { get { throw null; } set { } } + public int? Uid { get { throw null; } set { } } + Azure.Compute.Batch.LinuxUserConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.LinuxUserConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct LoginMode : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public LoginMode(string value) { throw null; } + public static Azure.Compute.Batch.LoginMode Batch { get { throw null; } } + public static Azure.Compute.Batch.LoginMode Interactive { get { throw null; } } + public bool Equals(Azure.Compute.Batch.LoginMode other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.LoginMode left, Azure.Compute.Batch.LoginMode right) { throw null; } + public static implicit operator Azure.Compute.Batch.LoginMode (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.LoginMode left, Azure.Compute.Batch.LoginMode right) { throw null; } + public override string ToString() { throw null; } + } + public partial class ManagedDisk : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ManagedDisk(Azure.Compute.Batch.StorageAccountType storageAccountType) { } + public Azure.Compute.Batch.StorageAccountType StorageAccountType { get { throw null; } set { } } + Azure.Compute.Batch.ManagedDisk System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ManagedDisk System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class MetadataItem : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public MetadataItem(string name, string value) { } + public string Name { get { throw null; } set { } } + public string Value { get { throw null; } set { } } + Azure.Compute.Batch.MetadataItem System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.MetadataItem System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class MountConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public MountConfiguration() { } + public Azure.Compute.Batch.AzureBlobFileSystemConfiguration AzureBlobFileSystemConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.AzureFileShareConfiguration AzureFileShareConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.CifsMountConfiguration CifsMountConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.NfsMountConfiguration NfsMountConfiguration { get { throw null; } set { } } + Azure.Compute.Batch.MountConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.MountConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class MultiInstanceSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public MultiInstanceSettings(string coordinationCommandLine) { } + public System.Collections.Generic.IList CommonResourceFiles { get { throw null; } } + public string CoordinationCommandLine { get { throw null; } set { } } + public int? NumberOfInstances { get { throw null; } set { } } + Azure.Compute.Batch.MultiInstanceSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.MultiInstanceSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class NameValuePair : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public NameValuePair() { } + public string Name { get { throw null; } set { } } + public string Value { get { throw null; } set { } } + Azure.Compute.Batch.NameValuePair System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.NameValuePair System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class NetworkConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public NetworkConfiguration() { } + public Azure.Compute.Batch.DynamicVNetAssignmentScope? DynamicVNetAssignmentScope { get { throw null; } set { } } + public bool? EnableAcceleratedNetworking { get { throw null; } set { } } + public Azure.Compute.Batch.BatchPoolEndpointConfiguration EndpointConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.PublicIpAddressConfiguration PublicIpAddressConfiguration { get { throw null; } set { } } + public string SubnetId { get { throw null; } set { } } + Azure.Compute.Batch.NetworkConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.NetworkConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class NetworkSecurityGroupRule : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public NetworkSecurityGroupRule(int priority, Azure.Compute.Batch.NetworkSecurityGroupRuleAccess access, string sourceAddressPrefix) { } + public Azure.Compute.Batch.NetworkSecurityGroupRuleAccess Access { get { throw null; } set { } } + public int Priority { get { throw null; } set { } } + public string SourceAddressPrefix { get { throw null; } set { } } + public System.Collections.Generic.IList SourcePortRanges { get { throw null; } } + Azure.Compute.Batch.NetworkSecurityGroupRule System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.NetworkSecurityGroupRule System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct NetworkSecurityGroupRuleAccess : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public NetworkSecurityGroupRuleAccess(string value) { throw null; } + public static Azure.Compute.Batch.NetworkSecurityGroupRuleAccess Allow { get { throw null; } } + public static Azure.Compute.Batch.NetworkSecurityGroupRuleAccess Deny { get { throw null; } } + public bool Equals(Azure.Compute.Batch.NetworkSecurityGroupRuleAccess other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.NetworkSecurityGroupRuleAccess left, Azure.Compute.Batch.NetworkSecurityGroupRuleAccess right) { throw null; } + public static implicit operator Azure.Compute.Batch.NetworkSecurityGroupRuleAccess (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.NetworkSecurityGroupRuleAccess left, Azure.Compute.Batch.NetworkSecurityGroupRuleAccess right) { throw null; } + public override string ToString() { throw null; } + } + public partial class NfsMountConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public NfsMountConfiguration(string source, string relativeMountPath) { } + public string MountOptions { get { throw null; } set { } } + public string RelativeMountPath { get { throw null; } set { } } + public string Source { get { throw null; } set { } } + Azure.Compute.Batch.NfsMountConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.NfsMountConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct OnAllBatchTasksComplete : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public OnAllBatchTasksComplete(string value) { throw null; } + public static Azure.Compute.Batch.OnAllBatchTasksComplete NoAction { get { throw null; } } + public static Azure.Compute.Batch.OnAllBatchTasksComplete TerminateJob { get { throw null; } } + public bool Equals(Azure.Compute.Batch.OnAllBatchTasksComplete other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.OnAllBatchTasksComplete left, Azure.Compute.Batch.OnAllBatchTasksComplete right) { throw null; } + public static implicit operator Azure.Compute.Batch.OnAllBatchTasksComplete (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.OnAllBatchTasksComplete left, Azure.Compute.Batch.OnAllBatchTasksComplete right) { throw null; } + public override string ToString() { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct OnBatchTaskFailure : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public OnBatchTaskFailure(string value) { throw null; } + public static Azure.Compute.Batch.OnBatchTaskFailure NoAction { get { throw null; } } + public static Azure.Compute.Batch.OnBatchTaskFailure PerformExitOptionsJobAction { get { throw null; } } + public bool Equals(Azure.Compute.Batch.OnBatchTaskFailure other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.OnBatchTaskFailure left, Azure.Compute.Batch.OnBatchTaskFailure right) { throw null; } + public static implicit operator Azure.Compute.Batch.OnBatchTaskFailure (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.OnBatchTaskFailure left, Azure.Compute.Batch.OnBatchTaskFailure right) { throw null; } + public override string ToString() { throw null; } + } + public partial class OSDisk : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public OSDisk() { } + public Azure.Compute.Batch.CachingType? Caching { get { throw null; } set { } } + public int? DiskSizeGB { get { throw null; } set { } } + public Azure.Compute.Batch.DiffDiskSettings EphemeralOSDiskSettings { get { throw null; } set { } } + public Azure.Compute.Batch.ManagedDisk ManagedDisk { get { throw null; } set { } } + public bool? WriteAcceleratorEnabled { get { throw null; } set { } } + Azure.Compute.Batch.OSDisk System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.OSDisk System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct OSType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public OSType(string value) { throw null; } + public static Azure.Compute.Batch.OSType Linux { get { throw null; } } + public static Azure.Compute.Batch.OSType Windows { get { throw null; } } + public bool Equals(Azure.Compute.Batch.OSType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.OSType left, Azure.Compute.Batch.OSType right) { throw null; } + public static implicit operator Azure.Compute.Batch.OSType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.OSType left, Azure.Compute.Batch.OSType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class OutputFile : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public OutputFile(string filePattern, Azure.Compute.Batch.OutputFileDestination destination, Azure.Compute.Batch.OutputFileUploadConfig uploadOptions) { } + public Azure.Compute.Batch.OutputFileDestination Destination { get { throw null; } set { } } + public string FilePattern { get { throw null; } set { } } + public Azure.Compute.Batch.OutputFileUploadConfig UploadOptions { get { throw null; } set { } } + Azure.Compute.Batch.OutputFile System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.OutputFile System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class OutputFileBlobContainerDestination : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public OutputFileBlobContainerDestination(string containerUrl) { } + public string ContainerUrl { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeIdentityReference IdentityReference { get { throw null; } set { } } + public string Path { get { throw null; } set { } } + public System.Collections.Generic.IList UploadHeaders { get { throw null; } } + Azure.Compute.Batch.OutputFileBlobContainerDestination System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.OutputFileBlobContainerDestination System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class OutputFileDestination : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public OutputFileDestination() { } + public Azure.Compute.Batch.OutputFileBlobContainerDestination Container { get { throw null; } set { } } + Azure.Compute.Batch.OutputFileDestination System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.OutputFileDestination System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct OutputFileUploadCondition : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public OutputFileUploadCondition(string value) { throw null; } + public static Azure.Compute.Batch.OutputFileUploadCondition TaskCompletion { get { throw null; } } + public static Azure.Compute.Batch.OutputFileUploadCondition TaskFailure { get { throw null; } } + public static Azure.Compute.Batch.OutputFileUploadCondition TaskSuccess { get { throw null; } } + public bool Equals(Azure.Compute.Batch.OutputFileUploadCondition other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.OutputFileUploadCondition left, Azure.Compute.Batch.OutputFileUploadCondition right) { throw null; } + public static implicit operator Azure.Compute.Batch.OutputFileUploadCondition (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.OutputFileUploadCondition left, Azure.Compute.Batch.OutputFileUploadCondition right) { throw null; } + public override string ToString() { throw null; } + } + public partial class OutputFileUploadConfig : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public OutputFileUploadConfig(Azure.Compute.Batch.OutputFileUploadCondition uploadCondition) { } + public Azure.Compute.Batch.OutputFileUploadCondition UploadCondition { get { throw null; } set { } } + Azure.Compute.Batch.OutputFileUploadConfig System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.OutputFileUploadConfig System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class PublicIpAddressConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public PublicIpAddressConfiguration() { } + public System.Collections.Generic.IList IpAddressIds { get { throw null; } } + public Azure.Compute.Batch.IpAddressProvisioningType? IpAddressProvisioningType { get { throw null; } set { } } + Azure.Compute.Batch.PublicIpAddressConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.PublicIpAddressConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class RecentBatchJob : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public RecentBatchJob() { } + public string Id { get { throw null; } set { } } + public string Url { get { throw null; } set { } } + Azure.Compute.Batch.RecentBatchJob System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.RecentBatchJob System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ResizeError : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal ResizeError() { } + public string Code { get { throw null; } } + public string Message { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Values { get { throw null; } } + Azure.Compute.Batch.ResizeError System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ResizeError System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class ResourceFile : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ResourceFile() { } + public string AutoStorageContainerName { get { throw null; } set { } } + public string BlobPrefix { get { throw null; } set { } } + public string FileMode { get { throw null; } set { } } + public string FilePath { get { throw null; } set { } } + public string HttpUrl { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeIdentityReference IdentityReference { get { throw null; } set { } } + public string StorageContainerUrl { get { throw null; } set { } } + Azure.Compute.Batch.ResourceFile System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ResourceFile System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class RollingUpgradePolicy : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public RollingUpgradePolicy() { } + public bool? EnableCrossZoneUpgrade { get { throw null; } set { } } + public int? MaxBatchInstancePercent { get { throw null; } set { } } + public int? MaxUnhealthyInstancePercent { get { throw null; } set { } } + public int? MaxUnhealthyUpgradedInstancePercent { get { throw null; } set { } } + public System.TimeSpan? PauseTimeBetweenBatches { get { throw null; } set { } } + public bool? PrioritizeUnhealthyInstances { get { throw null; } set { } } + public bool? RollbackFailedInstancesOnPolicyBreach { get { throw null; } set { } } + Azure.Compute.Batch.RollingUpgradePolicy System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.RollingUpgradePolicy System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct SchedulingState : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public SchedulingState(string value) { throw null; } + public static Azure.Compute.Batch.SchedulingState Disabled { get { throw null; } } + public static Azure.Compute.Batch.SchedulingState Enabled { get { throw null; } } + public bool Equals(Azure.Compute.Batch.SchedulingState other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.SchedulingState left, Azure.Compute.Batch.SchedulingState right) { throw null; } + public static implicit operator Azure.Compute.Batch.SchedulingState (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.SchedulingState left, Azure.Compute.Batch.SchedulingState right) { throw null; } + public override string ToString() { throw null; } + } + public partial class SecurityProfile : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public SecurityProfile(bool encryptionAtHost, Azure.Compute.Batch.SecurityTypes securityType, Azure.Compute.Batch.UefiSettings uefiSettings) { } + public bool EncryptionAtHost { get { throw null; } set { } } + public Azure.Compute.Batch.SecurityTypes SecurityType { get { throw null; } set { } } + public Azure.Compute.Batch.UefiSettings UefiSettings { get { throw null; } set { } } + Azure.Compute.Batch.SecurityProfile System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.SecurityProfile System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct SecurityTypes : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public SecurityTypes(string value) { throw null; } + public static Azure.Compute.Batch.SecurityTypes TrustedLaunch { get { throw null; } } + public bool Equals(Azure.Compute.Batch.SecurityTypes other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.SecurityTypes left, Azure.Compute.Batch.SecurityTypes right) { throw null; } + public static implicit operator Azure.Compute.Batch.SecurityTypes (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.SecurityTypes left, Azure.Compute.Batch.SecurityTypes right) { throw null; } + public override string ToString() { throw null; } + } + public partial class ServiceArtifactReference : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public ServiceArtifactReference(string id) { } + public string Id { get { throw null; } set { } } + Azure.Compute.Batch.ServiceArtifactReference System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.ServiceArtifactReference System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct StatusLevelTypes : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public StatusLevelTypes(string value) { throw null; } + public static Azure.Compute.Batch.StatusLevelTypes Error { get { throw null; } } + public static Azure.Compute.Batch.StatusLevelTypes Info { get { throw null; } } + public static Azure.Compute.Batch.StatusLevelTypes Warning { get { throw null; } } + public bool Equals(Azure.Compute.Batch.StatusLevelTypes other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.StatusLevelTypes left, Azure.Compute.Batch.StatusLevelTypes right) { throw null; } + public static implicit operator Azure.Compute.Batch.StatusLevelTypes (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.StatusLevelTypes left, Azure.Compute.Batch.StatusLevelTypes right) { throw null; } + public override string ToString() { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct StorageAccountType : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public StorageAccountType(string value) { throw null; } + public static Azure.Compute.Batch.StorageAccountType PremiumLRS { get { throw null; } } + public static Azure.Compute.Batch.StorageAccountType StandardLRS { get { throw null; } } + public static Azure.Compute.Batch.StorageAccountType StandardSSDLRS { get { throw null; } } + public bool Equals(Azure.Compute.Batch.StorageAccountType other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.StorageAccountType left, Azure.Compute.Batch.StorageAccountType right) { throw null; } + public static implicit operator Azure.Compute.Batch.StorageAccountType (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.StorageAccountType left, Azure.Compute.Batch.StorageAccountType right) { throw null; } + public override string ToString() { throw null; } + } + public partial class UefiSettings : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public UefiSettings() { } + public bool? SecureBootEnabled { get { throw null; } set { } } + public bool? VTpmEnabled { get { throw null; } set { } } + Azure.Compute.Batch.UefiSettings System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UefiSettings System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + public readonly partial struct UpgradeMode : System.IEquatable + { + private readonly object _dummy; + private readonly int _dummyPrimitive; + public UpgradeMode(string value) { throw null; } + public static Azure.Compute.Batch.UpgradeMode Automatic { get { throw null; } } + public static Azure.Compute.Batch.UpgradeMode Manual { get { throw null; } } + public static Azure.Compute.Batch.UpgradeMode Rolling { get { throw null; } } + public bool Equals(Azure.Compute.Batch.UpgradeMode other) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override bool Equals(object obj) { throw null; } + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + public override int GetHashCode() { throw null; } + public static bool operator ==(Azure.Compute.Batch.UpgradeMode left, Azure.Compute.Batch.UpgradeMode right) { throw null; } + public static implicit operator Azure.Compute.Batch.UpgradeMode (string value) { throw null; } + public static bool operator !=(Azure.Compute.Batch.UpgradeMode left, Azure.Compute.Batch.UpgradeMode right) { throw null; } + public override string ToString() { throw null; } + } + public partial class UpgradePolicy : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public UpgradePolicy(Azure.Compute.Batch.UpgradeMode mode) { } + public Azure.Compute.Batch.AutomaticOsUpgradePolicy AutomaticOsUpgradePolicy { get { throw null; } set { } } + public Azure.Compute.Batch.UpgradeMode Mode { get { throw null; } set { } } + public Azure.Compute.Batch.RollingUpgradePolicy RollingUpgradePolicy { get { throw null; } set { } } + Azure.Compute.Batch.UpgradePolicy System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UpgradePolicy System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class UploadBatchServiceLogsContent : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public UploadBatchServiceLogsContent(string containerUrl, System.DateTimeOffset startTime) { } + public string ContainerUrl { get { throw null; } } + public System.DateTimeOffset? EndTime { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodeIdentityReference IdentityReference { get { throw null; } set { } } + public System.DateTimeOffset StartTime { get { throw null; } } + Azure.Compute.Batch.UploadBatchServiceLogsContent System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UploadBatchServiceLogsContent System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class UploadBatchServiceLogsResult : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal UploadBatchServiceLogsResult() { } + public int NumberOfFilesUploaded { get { throw null; } } + public string VirtualDirectoryName { get { throw null; } } + Azure.Compute.Batch.UploadBatchServiceLogsResult System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UploadBatchServiceLogsResult System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class UserAccount : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public UserAccount(string name, string password) { } + public Azure.Compute.Batch.ElevationLevel? ElevationLevel { get { throw null; } set { } } + public Azure.Compute.Batch.LinuxUserConfiguration LinuxUserConfiguration { get { throw null; } set { } } + public string Name { get { throw null; } set { } } + public string Password { get { throw null; } set { } } + public Azure.Compute.Batch.WindowsUserConfiguration WindowsUserConfiguration { get { throw null; } set { } } + Azure.Compute.Batch.UserAccount System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UserAccount System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class UserAssignedIdentity : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal UserAssignedIdentity() { } + public string ClientId { get { throw null; } } + public string PrincipalId { get { throw null; } } + public string ResourceId { get { throw null; } } + Azure.Compute.Batch.UserAssignedIdentity System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UserAssignedIdentity System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class UserIdentity : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public UserIdentity() { } + public Azure.Compute.Batch.AutoUserSpecification AutoUser { get { throw null; } set { } } + public string Username { get { throw null; } set { } } + Azure.Compute.Batch.UserIdentity System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.UserIdentity System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class VirtualMachineConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public VirtualMachineConfiguration(Azure.Compute.Batch.ImageReference imageReference, string nodeAgentSkuId) { } + public Azure.Compute.Batch.ContainerConfiguration ContainerConfiguration { get { throw null; } set { } } + public System.Collections.Generic.IList DataDisks { get { throw null; } } + public Azure.Compute.Batch.DiskEncryptionConfiguration DiskEncryptionConfiguration { get { throw null; } set { } } + public System.Collections.Generic.IList Extensions { get { throw null; } } + public Azure.Compute.Batch.ImageReference ImageReference { get { throw null; } set { } } + public string LicenseType { get { throw null; } set { } } + public string NodeAgentSkuId { get { throw null; } set { } } + public Azure.Compute.Batch.BatchNodePlacementConfiguration NodePlacementConfiguration { get { throw null; } set { } } + public Azure.Compute.Batch.OSDisk OsDisk { get { throw null; } set { } } + public Azure.Compute.Batch.SecurityProfile SecurityProfile { get { throw null; } set { } } + public Azure.Compute.Batch.ServiceArtifactReference ServiceArtifactReference { get { throw null; } set { } } + public Azure.Compute.Batch.WindowsConfiguration WindowsConfiguration { get { throw null; } set { } } + Azure.Compute.Batch.VirtualMachineConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.VirtualMachineConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class VirtualMachineInfo : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal VirtualMachineInfo() { } + public Azure.Compute.Batch.ImageReference ImageReference { get { throw null; } } + public string ScaleSetVmResourceId { get { throw null; } } + Azure.Compute.Batch.VirtualMachineInfo System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.VirtualMachineInfo System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class VMExtension : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public VMExtension(string name, string publisher, string type) { } + public bool? AutoUpgradeMinorVersion { get { throw null; } set { } } + public bool? EnableAutomaticUpgrade { get { throw null; } set { } } + public string Name { get { throw null; } set { } } + public System.Collections.Generic.IDictionary ProtectedSettings { get { throw null; } } + public System.Collections.Generic.IList ProvisionAfterExtensions { get { throw null; } } + public string Publisher { get { throw null; } set { } } + public System.Collections.Generic.IDictionary Settings { get { throw null; } } + public string Type { get { throw null; } set { } } + public string TypeHandlerVersion { get { throw null; } set { } } + Azure.Compute.Batch.VMExtension System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.VMExtension System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class VMExtensionInstanceView : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + internal VMExtensionInstanceView() { } + public string Name { get { throw null; } } + public System.Collections.Generic.IReadOnlyList Statuses { get { throw null; } } + public System.Collections.Generic.IReadOnlyList SubStatuses { get { throw null; } } + Azure.Compute.Batch.VMExtensionInstanceView System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.VMExtensionInstanceView System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class WindowsConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public WindowsConfiguration() { } + public bool? EnableAutomaticUpdates { get { throw null; } set { } } + Azure.Compute.Batch.WindowsConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.WindowsConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } + public partial class WindowsUserConfiguration : System.ClientModel.Primitives.IJsonModel, System.ClientModel.Primitives.IPersistableModel + { + public WindowsUserConfiguration() { } + public Azure.Compute.Batch.LoginMode? LoginMode { get { throw null; } set { } } + Azure.Compute.Batch.WindowsUserConfiguration System.ClientModel.Primitives.IJsonModel.Create(ref System.Text.Json.Utf8JsonReader reader, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + void System.ClientModel.Primitives.IJsonModel.Write(System.Text.Json.Utf8JsonWriter writer, System.ClientModel.Primitives.ModelReaderWriterOptions options) { } + Azure.Compute.Batch.WindowsUserConfiguration System.ClientModel.Primitives.IPersistableModel.Create(System.BinaryData data, System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + string System.ClientModel.Primitives.IPersistableModel.GetFormatFromOptions(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + System.BinaryData System.ClientModel.Primitives.IPersistableModel.Write(System.ClientModel.Primitives.ModelReaderWriterOptions options) { throw null; } + } +} +namespace Microsoft.Extensions.Azure +{ + public static partial class ComputeBatchClientBuilderExtensions + { + public static Azure.Core.Extensions.IAzureClientBuilder AddBatchClient(this TBuilder builder, System.Uri endpoint) where TBuilder : Azure.Core.Extensions.IAzureClientFactoryBuilderWithCredential { throw null; } + public static Azure.Core.Extensions.IAzureClientBuilder AddBatchClient(this TBuilder builder, TConfiguration configuration) where TBuilder : Azure.Core.Extensions.IAzureClientFactoryBuilderWithConfiguration { throw null; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/assets.json b/sdk/batch/Azure.Compute.Batch/assets.json new file mode 100644 index 0000000000000..726e7ec7f9f89 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "net", + "TagPrefix": "net/batch/Azure.Compute.Batch", + "Tag": "net/batch/Azure.Compute.Batch_6533cf2dd7" +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Azure.Compute.Batch.csproj b/sdk/batch/Azure.Compute.Batch/src/Azure.Compute.Batch.csproj new file mode 100644 index 0000000000000..44d8016cad956 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Azure.Compute.Batch.csproj @@ -0,0 +1,20 @@ + + + This is the Azure.Compute.Batch client library for developing .NET applications with rich experience. + Azure SDK Code Generation Azure.Compute.Batch for Azure Data Plane + 1.0.0-beta.1 + Azure.Compute.Batch + $(RequiredTargetFrameworks) + true + + + + + + + + + + + + diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs new file mode 100644 index 0000000000000..794b39965948e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/BatchClientCustom.cs @@ -0,0 +1,608 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Text; +using Azure.Core.Pipeline; +using Azure.Core; +using Azure.Compute.Batch.Custom; +using System.Threading.Tasks; +using static Azure.Core.HttpPipelineExtensions; +using System.Threading; +using System.Security.Cryptography; + +namespace Azure.Compute.Batch +{ + public partial class BatchClient + { + private readonly AzureNamedKeyCredential _namedKeyCredential; + + /// Initializes a new instance of BatchClient. + /// Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com). + /// A credential used to authenticate to an Azure Service. + /// is null. + public BatchClient(Uri endpoint, AzureNamedKeyCredential credential) : this(endpoint, credential, new BatchClientOptions()) + { + } + + /// Initializes a new instance of BatchClient. + /// Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com). + /// A credential used to authenticate to an Azure Service. + /// The options for configuring the client. + /// is null. + public BatchClient(Uri endpoint, AzureNamedKeyCredential credential, BatchClientOptions options) + { + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new BatchClientOptions(); + + ClientDiagnostics = new ClientDiagnostics(options, true); + _namedKeyCredential = credential; + _pipeline = HttpPipelineBuilder.Build(options, Array.Empty(), new HttpPipelinePolicy[] { new BatchNamedKeyCredentialPolicy(credential) }, new ResponseClassifier()); + _endpoint = endpoint; + _apiVersion = options.Version; + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets basic properties of a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task> PoolExistsAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.PoolExists"); + scope.Start(); + try + { + using HttpMessage message = CreatePoolExistsRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessHeadAsBoolMessageAsync(message, ClientDiagnostics, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets basic properties of a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response PoolExists(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.PoolExists"); + scope.Start(); + try + { + using HttpMessage message = CreatePoolExistsRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessHeadAsBoolMessage(message, ClientDiagnostics, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Checks the specified Job Schedule exists. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule which you want to check. + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task> JobScheduleExistsAsync(string jobScheduleId, int? timeOut = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.JobScheduleExists"); + scope.Start(); + try + { + using HttpMessage message = CreateJobScheduleExistsRequest(jobScheduleId, timeOut, ocpDate, requestConditions, context); + return await _pipeline.ProcessHeadAsBoolMessageAsync(message, ClientDiagnostics, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Checks the specified Job Schedule exists. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule which you want to check. + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response JobScheduleExists(string jobScheduleId, int? timeOut = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.JobScheduleExists"); + scope.Start(); + try + { + using HttpMessage message = CreateJobScheduleExistsRequest(jobScheduleId, timeOut, ocpDate, requestConditions, context); + return _pipeline.ProcessHeadAsBoolMessage(message, ClientDiagnostics, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task> GetTaskFilePropertiesAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFileProperties"); + scope.Start(); + try + { + Response response = await GetTaskFilePropertiesInternalAsync(jobId, taskId, filePath, timeOutInSeconds, ocpdate, null, null).ConfigureAwait(false); + return Response.FromValue(BatchFileProperties.FromResponse(response), response); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetTaskFileProperties(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFileProperties"); + scope.Start(); + try + { + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetTaskFilePropertiesInternal(jobId, taskId, filePath, timeOutInSeconds, ocpdate, null, context); + return Response.FromValue(BatchFileProperties.FromResponse(response), response); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Compute Node file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual async Task> GetNodeFilePropertiesAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeFileProperties"); + scope.Start(); + try + { + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetNodeFilePropertiesInternalAsync(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, null, context).ConfigureAwait(false); + return Response.FromValue(BatchFileProperties.FromResponse(response), response); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Compute Node file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + public virtual Response GetNodeFileProperties(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeFileProperties"); + scope.Start(); + try + { + Response response = GetNodeFilePropertiesInternal(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, null, null); + return Response.FromValue(BatchFileProperties.FromResponse(response), response); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The pool properties to update. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UpdatePoolAsync(string poolId, BatchPoolUpdateContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(pool, nameof(pool)); + + using RequestContent content = pool.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await UpdatePoolAsync(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The pool properties to update. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UpdatePool(string poolId, BatchPoolUpdateContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(pool, nameof(pool)); + + using RequestContent content = pool.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = UpdatePool(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job whose properties you want to update. + /// The options to use for updating the Job.. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UpdateJobAsync(string jobId, BatchJobUpdateContent job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(job, nameof(job)); + + using RequestContent content = job.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await UpdateJobAsync(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job whose properties you want to update. + /// The options to use for updating the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UpdateJob(string jobId, BatchJobUpdateContent job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(job, nameof(job)); + + using RequestContent content = job.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = UpdateJob(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to update. + /// The options to use for updating the Job Schedule. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, BatchJobScheduleUpdateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + + using RequestContent content = jobSchedule.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await UpdateJobScheduleAsync(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to update. + /// The options to use for updating the Job Schedule. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UpdateJobSchedule(string jobScheduleId, BatchJobScheduleUpdateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + + using RequestContent content = jobSchedule.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = UpdateJobSchedule(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/BatchNamedKeyCredentialPolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/BatchNamedKeyCredentialPolicy.cs new file mode 100644 index 0000000000000..715926c3225a2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/BatchNamedKeyCredentialPolicy.cs @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Security.Cryptography; +using System.Linq; +using System.Net.Http.Headers; +using System.Text; +using Azure.Core.Pipeline; +using Azure.Core; +using System.Collections.Specialized; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Custom +{ + /// + /// BatchNamedKeyCredential Policy + /// + internal class BatchNamedKeyCredentialPolicy : HttpPipelineSynchronousPolicy + { + /// + /// Whether to always add the x-ms-date header. + /// + private const bool IncludeXMsDate = true; + + /// + /// Batch Account name + /// + private string AccountName; + + /// + /// Batch Shared Key + /// + private byte[] AccountKey; + + /// + /// Create a new BatchNamedKeyCredentialPolicy + /// + /// BatchNamedKeyCredentialPolicy to authenticate requests. + public BatchNamedKeyCredentialPolicy(AzureNamedKeyCredential credentials) + { + var (name, key) = credentials; + AccountName = name; ; + SetAccountKey(key); + } + + /// + /// Update the Batch Account's access key. This intended to be used + /// when you've regenerated your Batch Account's access keys and want + /// to update long lived clients. + /// + /// A Batch Account access key. + public void SetAccountKey(string accountKey) => + AccountKey = Convert.FromBase64String(accountKey); + + /// + /// Sign the request using the shared key credentials. + /// + /// The message with the request to sign. + public override void OnSendingRequest(HttpMessage message) + { + base.OnSendingRequest(message); + + // Add a x-ms-date header + if (IncludeXMsDate) + { + var date = DateTimeOffset.UtcNow.ToString("r", CultureInfo.InvariantCulture); + message.Request.Headers.SetValue(Constants.HeaderNames.Date, date); + } + + var stringToSign = BuildStringToSign(message); + var signature = ComputeSasSignature(stringToSign); + + var key = new AuthenticationHeaderValue(Constants.HeaderNames.SharedKey, AccountName + ":" + signature).ToString(); + message.Request.Headers.SetValue(Constants.HeaderNames.Authorization, key); + } + + // If you change this method, make sure live tests are passing before merging PR. + private string BuildStringToSign(HttpMessage message) + { + // https://docs.microsoft.com/en-us/rest/api/Batchservices/authorize-with-shared-key + + message.Request.Headers.TryGetValue(Constants.HeaderNames.ContentEncoding, out var contentEncoding); + message.Request.Headers.TryGetValue(Constants.HeaderNames.ContentLanguage, out var contentLanguage); + message.Request.Headers.TryGetValue(Constants.HeaderNames.ContentMD5, out var contentMD5); + message.Request.Headers.TryGetValue(Constants.HeaderNames.ContentType, out var contentType); + message.Request.Headers.TryGetValue(Constants.HeaderNames.IfModifiedSince, out var ifModifiedSince); + message.Request.Headers.TryGetValue(Constants.HeaderNames.IfMatch, out var ifMatch); + message.Request.Headers.TryGetValue(Constants.HeaderNames.IfNoneMatch, out var ifNoneMatch); + message.Request.Headers.TryGetValue(Constants.HeaderNames.IfUnmodifiedSince, out var ifUnmodifiedSince); + message.Request.Headers.TryGetValue(Constants.HeaderNames.Range, out var range); + + string contentLengthString = string.Empty; + + if (message.Request.Content != null && message.Request.Content.TryComputeLength(out long contentLength)) + { + contentLengthString = contentLength.ToString(CultureInfo.InvariantCulture); + } + var uri = message.Request.Uri.ToUri(); + + var stringBuilder = new StringBuilder(uri.AbsolutePath.Length + 64); + stringBuilder.Append(message.Request.Method.ToString().ToUpperInvariant()).Append('\n'); + stringBuilder.Append(contentEncoding ?? "").Append('\n'); + stringBuilder.Append(contentLanguage ?? "").Append('\n'); + stringBuilder.Append(contentLengthString == "0" ? "" : contentLengthString ?? "").Append('\n'); + stringBuilder.Append(contentMD5 ?? "");// todo: fix base 64 VALUE + stringBuilder.Append('\n'); + stringBuilder.Append(contentType ?? "").Append('\n'); // Empty date because x-ms-date is expected (as per web page above)) + stringBuilder.Append('\n'); + stringBuilder.Append(ifModifiedSince ?? "").Append('\n'); + stringBuilder.Append(ifMatch ?? "").Append('\n'); + stringBuilder.Append(ifNoneMatch ?? "").Append('\n'); + stringBuilder.Append(ifUnmodifiedSince ?? "").Append('\n'); + stringBuilder.Append(range ?? "").Append('\n'); + BuildCanonicalizedHeaders(stringBuilder, message); + BuildCanonicalizedResource(stringBuilder, uri); + return stringBuilder.ToString(); + } + + // If you change this method, make sure live tests are passing before merging PR. + private void BuildCanonicalizedHeaders(StringBuilder stringBuilder, HttpMessage message) + { + // Grab all the "x-ms-*" headers, trim whitespace, lowercase, sort, + // and combine them with their values (separated by a colon). + var headers = new List(); + foreach (var header in message.Request.Headers) + { + if (header.Name.StartsWith(Constants.HeaderNames.OCPPrefix, StringComparison.OrdinalIgnoreCase)) + { + headers.Add(new HttpHeader(header.Name.ToLowerInvariant(), header.Value,null)); + } + } + + headers.Sort(static (x, y) => string.CompareOrdinal(x.Name, y.Name)); + + foreach (var header in headers) + { + stringBuilder + .Append(header.Name) + .Append(':') + .Append(header.Value) + .Append('\n'); + } + } + + // If you change this method, make sure live tests are passing before merging PR. + private void BuildCanonicalizedResource(StringBuilder stringBuilder, Uri resource) + { + // https://docs.microsoft.com/en-us/rest/api/Batchservices/authentication-for-the-azure-Batch-services + stringBuilder.Append('/'); + stringBuilder.Append(AccountName); + if (resource.AbsolutePath.Length > 0) + { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + stringBuilder.Append(resource.AbsolutePath);//EscapedPath() + } + else + { + // a slash is required to indicate the root path + stringBuilder.Append('/'); + } + string queryString = resource.Query; + var namedValueCollection = System.Web.HttpUtility.ParseQueryString(queryString); + IDictionary parameters = ToDictionary(namedValueCollection); + //System.Collections.Generic.IDictionary parameters = resource.Query.ToDictionary(; // Returns URL decoded values + if (parameters.Count > 0) + { + foreach (var name in parameters.Keys.OrderBy(key => key, StringComparer.Ordinal)) + { +#pragma warning disable CA1308 // Normalize strings to uppercase + stringBuilder.Append('\n').Append(name.ToLowerInvariant()).Append(':').Append(parameters[name]); +#pragma warning restore CA1308 // Normalize strings to uppercase + } + } + } + + /// + /// Generates a base-64 hash signature string for an HTTP request or + /// for a SAS. + /// + /// The message to sign. + /// The signed message. + private string ComputeSasSignature(string message) + { +#if NET6_0_OR_GREATER + return Convert.ToBase64String(HMACSHA256.HashData(AccountKeyValue, Encoding.UTF8.GetBytes(message))); +#else + return Convert.ToBase64String(new HMACSHA256(AccountKey).ComputeHash(Encoding.UTF8.GetBytes(message))); +#endif + } + + /// + /// A NameValueCollection extension method that converts to a dictionary. + /// + /// The collection to act on. + /// @this as an IDictionary<string,object> + private IDictionary ToDictionary(NameValueCollection collection) + { + var dict = new Dictionary(); + + if (collection != null) + { + foreach (string key in collection.AllKeys) + { + dict.Add(key, collection[key]); + } + } + + return dict; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Constants.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Constants.cs new file mode 100644 index 0000000000000..83d8fb28c5d65 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Constants.cs @@ -0,0 +1,36 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Text; + +namespace Azure.Compute.Batch.Custom +{ + internal static class Constants + { + /// + /// Header Name constant values. + /// + internal static class HeaderNames + { + public const string OCPPrefix = "ocp-"; + public const string Date = "ocp-date"; + public const string SharedKey = "SharedKey"; + public const string Authorization = "Authorization"; + public const string ContentEncoding = "Content-Encoding"; + public const string ContentLanguage = "Content-Language"; + public const string ContentLength = "Content-Length"; + public const string ContentMD5 = "Content-MD5"; + public const string ContentType = "Content-Type"; + public const string IfModifiedSince = "If-Modified-Since"; + public const string IfMatch = "If-Match"; + public const string IfNoneMatch = "If-None-Match"; + public const string IfUnmodifiedSince = "If-Unmodified-Since"; + public const string Range = "Range"; + public const string ContentRange = "Content-Range"; + public const string LastModified = "Last-Modified"; + public const string ETag = "ETag"; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchFileProperties.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchFileProperties.cs new file mode 100644 index 0000000000000..6cc2ef3dcbfae --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchFileProperties.cs @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Text.Json; + +namespace Azure.Compute.Batch +{ + /// + /// Collection of header values that describe Batch File properties + /// + public partial class BatchFileProperties + { + internal BatchFileProperties(bool batchFileIsDirectory, string batchFileMode, string batchFileUrl, DateTime creationTime) + { + BatchFileIsDirectory = batchFileIsDirectory; + BatchFileMode = batchFileMode; + BatchFileUrl = batchFileUrl; + CreationTime = creationTime; + } + + /// Whether the object represents a directory. + public bool BatchFileIsDirectory { get; } + + /// The file mode attribute in octal format. + public string BatchFileMode { get; } + + /// The URL of the file. + public string BatchFileUrl { get; } + + /// The file creation time. + public DateTime CreationTime { get; } + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchFileProperties FromResponse(Response response) + { + string batchFileIsDirectoryStr = ""; + bool batchFileIsDirectory = false; + string batchFileMode = ""; + string batchFileUrl = ""; + string creationTimeStr = ""; + + response.Headers.TryGetValue("ocp-creation-time", out creationTimeStr); + response.Headers.TryGetValue("ocp-batch-file-isdirectory", out batchFileIsDirectoryStr); + response.Headers.TryGetValue("ocp-batch-file-url", out batchFileUrl); + response.Headers.TryGetValue("ocp-batch-file-mode", out batchFileMode); + + Boolean.TryParse(batchFileIsDirectoryStr, out batchFileIsDirectory); + DateTime creationTime = DateTime.Parse(creationTimeStr, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal); + + return new BatchFileProperties(batchFileIsDirectory, batchFileMode, batchFileUrl, creationTime); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchJobScheduleUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchJobScheduleUpdateContent.cs new file mode 100644 index 0000000000000..56a02005f7197 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchJobScheduleUpdateContent.cs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating an Azure Batch Job Schedule. + public partial class BatchJobScheduleUpdateContent + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchJobUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchJobUpdateContent.cs new file mode 100644 index 0000000000000..0736f1d9a894d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchJobUpdateContent.cs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating an Azure Batch Job. + public partial class BatchJobUpdateContent + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolReplaceContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolReplaceContent.Serialization.cs new file mode 100644 index 0000000000000..29ff9f976cb7c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolReplaceContent.Serialization.cs @@ -0,0 +1,154 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolReplaceContent : IUtf8JsonSerializable, IJsonModel + { + void global::Azure.Core.IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void global::System.ClientModel.Primitives.IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(StartTask)) + { + writer.WritePropertyName("startTask"u8); + writer.WriteObjectValue(StartTask, options); + } + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + writer.WritePropertyName("certificateReferences"u8); + writer.WriteStartArray(); + writer.WriteEndArray(); + if (Optional.IsDefined(TargetNodeCommunicationMode)) + { + writer.WritePropertyName("targetNodeCommunicationMode"u8); + writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + + internal static BatchPoolReplaceContent DeserializeBatchPoolReplaceContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchStartTask startTask = default; + IList applicationPackageReferences = default; + IList metadata = default; + BatchNodeCommunicationMode? targetNodeCommunicationMode = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("targetNodeCommunicationMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolReplaceContent(startTask, applicationPackageReferences, metadata, targetNodeCommunicationMode, serializedAdditionalRawData); + } + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolReplaceContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolReplaceContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolReplaceContent.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolReplaceContent.cs new file mode 100644 index 0000000000000..e9b5ad886fb77 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolReplaceContent.cs @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// Parameters for replacing properties on an Azure Batch Pool. + public partial class BatchPoolReplaceContent + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolUpdateContent.cs new file mode 100644 index 0000000000000..de446aa7a6265 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Custom/Models/BatchPoolUpdateContent.cs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating an Azure Batch Pool. + public partial class BatchPoolUpdateContent + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AccessScope.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AccessScope.cs new file mode 100644 index 0000000000000..65c9771cf6837 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AccessScope.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// AccessScope enums. + public readonly partial struct AccessScope : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AccessScope(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JobValue = "job"; + + /// Grants access to perform all operations on the Job containing the Task. + public static AccessScope Job { get; } = new AccessScope(JobValue); + /// Determines if two values are the same. + public static bool operator ==(AccessScope left, AccessScope right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AccessScope left, AccessScope right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AccessScope(string value) => new AccessScope(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AccessScope other && Equals(other); + /// + public bool Equals(AccessScope other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.Serialization.cs new file mode 100644 index 0000000000000..f2b549e739c0b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AffinityInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AffinityInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("affinityId"u8); + writer.WriteStringValue(AffinityId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AffinityInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AffinityInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAffinityInfo(document.RootElement, options); + } + + internal static AffinityInfo DeserializeAffinityInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string affinityId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("affinityId"u8)) + { + affinityId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AffinityInfo(affinityId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AffinityInfo)} does not support writing '{options.Format}' format."); + } + } + + AffinityInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAffinityInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AffinityInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AffinityInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAffinityInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.cs new file mode 100644 index 0000000000000..340f6ac3f4eb0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.cs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A locality hint that can be used by the Batch service to select a Compute Node + /// on which to start a Task. + /// + public partial class AffinityInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. + /// is null. + public AffinityInfo(string affinityId) + { + Argument.AssertNotNull(affinityId, nameof(affinityId)); + + AffinityId = affinityId; + } + + /// Initializes a new instance of . + /// An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. + /// Keeps track of any properties unknown to the library. + internal AffinityInfo(string affinityId, IDictionary serializedAdditionalRawData) + { + AffinityId = affinityId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AffinityInfo() + { + } + + /// An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. + public string AffinityId { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AllocationState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AllocationState.cs new file mode 100644 index 0000000000000..45dba8184dc23 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AllocationState.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// AllocationState enums. + public readonly partial struct AllocationState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AllocationState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SteadyValue = "steady"; + private const string ResizingValue = "resizing"; + private const string StoppingValue = "stopping"; + + /// The Pool is not resizing. There are no changes to the number of Compute Nodes in the Pool in progress. A Pool enters this state when it is created and when no operations are being performed on the Pool to change the number of Compute Nodes. + public static AllocationState Steady { get; } = new AllocationState(SteadyValue); + /// The Pool is resizing; that is, Compute Nodes are being added to or removed from the Pool. + public static AllocationState Resizing { get; } = new AllocationState(ResizingValue); + /// The Pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. + public static AllocationState Stopping { get; } = new AllocationState(StoppingValue); + /// Determines if two values are the same. + public static bool operator ==(AllocationState left, AllocationState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AllocationState left, AllocationState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AllocationState(string value) => new AllocationState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AllocationState other && Equals(other); + /// + public bool Equals(AllocationState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs new file mode 100644 index 0000000000000..418b0c9f4ada9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AuthenticationTokenSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AuthenticationTokenSettings)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Access)) + { + writer.WritePropertyName("access"u8); + writer.WriteStartArray(); + foreach (var item in Access) + { + writer.WriteStringValue(item.ToString()); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AuthenticationTokenSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AuthenticationTokenSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAuthenticationTokenSettings(document.RootElement, options); + } + + internal static AuthenticationTokenSettings DeserializeAuthenticationTokenSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList access = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("access"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(new AccessScope(item.GetString())); + } + access = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AuthenticationTokenSettings(access ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AuthenticationTokenSettings)} does not support writing '{options.Format}' format."); + } + } + + AuthenticationTokenSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAuthenticationTokenSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AuthenticationTokenSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AuthenticationTokenSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAuthenticationTokenSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs new file mode 100644 index 0000000000000..400d876d185fa --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The settings for an authentication token that the Task can use to perform Batch + /// service operations. + /// + public partial class AuthenticationTokenSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public AuthenticationTokenSettings() + { + Access = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. + /// Keeps track of any properties unknown to the library. + internal AuthenticationTokenSettings(IList access, IDictionary serializedAdditionalRawData) + { + Access = access; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. + public IList Access { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRun.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRun.Serialization.cs new file mode 100644 index 0000000000000..48bc6352dae69 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRun.Serialization.cs @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AutoScaleRun : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutoScaleRun)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("timestamp"u8); + writer.WriteStringValue(Timestamp, "O"); + if (Optional.IsDefined(Results)) + { + writer.WritePropertyName("results"u8); + writer.WriteStringValue(Results); + } + if (Optional.IsDefined(Error)) + { + writer.WritePropertyName("error"u8); + writer.WriteObjectValue(Error, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AutoScaleRun IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutoScaleRun)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAutoScaleRun(document.RootElement, options); + } + + internal static AutoScaleRun DeserializeAutoScaleRun(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset timestamp = default; + string results = default; + AutoScaleRunError error = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("timestamp"u8)) + { + timestamp = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("results"u8)) + { + results = property.Value.GetString(); + continue; + } + if (property.NameEquals("error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + error = AutoScaleRunError.DeserializeAutoScaleRunError(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AutoScaleRun(timestamp, results, error, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AutoScaleRun)} does not support writing '{options.Format}' format."); + } + } + + AutoScaleRun IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAutoScaleRun(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AutoScaleRun)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AutoScaleRun FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAutoScaleRun(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRun.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRun.cs new file mode 100644 index 0000000000000..663ba502f78fc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRun.cs @@ -0,0 +1,80 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The results and errors from an execution of a Pool autoscale formula. + public partial class AutoScaleRun + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The time at which the autoscale formula was last evaluated. + internal AutoScaleRun(DateTimeOffset timestamp) + { + Timestamp = timestamp; + } + + /// Initializes a new instance of . + /// The time at which the autoscale formula was last evaluated. + /// The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. + /// Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. + /// Keeps track of any properties unknown to the library. + internal AutoScaleRun(DateTimeOffset timestamp, string results, AutoScaleRunError error, IDictionary serializedAdditionalRawData) + { + Timestamp = timestamp; + Results = results; + Error = error; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AutoScaleRun() + { + } + + /// The time at which the autoscale formula was last evaluated. + public DateTimeOffset Timestamp { get; } + /// The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. + public string Results { get; } + /// Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. + public AutoScaleRunError Error { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRunError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRunError.Serialization.cs new file mode 100644 index 0000000000000..d8cb62f788ff7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRunError.Serialization.cs @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AutoScaleRunError : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutoScaleRunError)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsCollectionDefined(Values)) + { + writer.WritePropertyName("values"u8); + writer.WriteStartArray(); + foreach (var item in Values) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AutoScaleRunError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutoScaleRunError)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAutoScaleRunError(document.RootElement, options); + } + + internal static AutoScaleRunError DeserializeAutoScaleRunError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + string message = default; + IReadOnlyList values = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("values"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NameValuePair.DeserializeNameValuePair(item, options)); + } + values = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AutoScaleRunError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AutoScaleRunError)} does not support writing '{options.Format}' format."); + } + } + + AutoScaleRunError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAutoScaleRunError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AutoScaleRunError)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AutoScaleRunError FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAutoScaleRunError(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRunError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRunError.cs new file mode 100644 index 0000000000000..6d52456551fbd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoScaleRunError.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error that occurred when executing or evaluating a Pool autoscale formula. + public partial class AutoScaleRunError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal AutoScaleRunError() + { + Values = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the autoscale error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the autoscale error. + /// Keeps track of any properties unknown to the library. + internal AutoScaleRunError(string code, string message, IReadOnlyList values, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + Values = values; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; } + /// A message describing the autoscale error, intended to be suitable for display in a user interface. + public string Message { get; } + /// A list of additional error details related to the autoscale error. + public IReadOnlyList Values { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserScope.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserScope.cs new file mode 100644 index 0000000000000..be1e664f14147 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserScope.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// AutoUserScope enums. + public readonly partial struct AutoUserScope : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public AutoUserScope(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TaskValue = "task"; + private const string PoolValue = "pool"; + + /// Specifies that the service should create a new user for the Task. + public static AutoUserScope Task { get; } = new AutoUserScope(TaskValue); + /// Specifies that the Task runs as the common auto user Account which is created on every Compute Node in a Pool. + public static AutoUserScope Pool { get; } = new AutoUserScope(PoolValue); + /// Determines if two values are the same. + public static bool operator ==(AutoUserScope left, AutoUserScope right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(AutoUserScope left, AutoUserScope right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator AutoUserScope(string value) => new AutoUserScope(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is AutoUserScope other && Equals(other); + /// + public bool Equals(AutoUserScope other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.Serialization.cs new file mode 100644 index 0000000000000..17a5690247ce9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.Serialization.cs @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AutoUserSpecification : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutoUserSpecification)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Scope)) + { + writer.WritePropertyName("scope"u8); + writer.WriteStringValue(Scope.Value.ToString()); + } + if (Optional.IsDefined(ElevationLevel)) + { + writer.WritePropertyName("elevationLevel"u8); + writer.WriteStringValue(ElevationLevel.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AutoUserSpecification IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutoUserSpecification)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAutoUserSpecification(document.RootElement, options); + } + + internal static AutoUserSpecification DeserializeAutoUserSpecification(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + AutoUserScope? scope = default; + ElevationLevel? elevationLevel = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("scope"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + scope = new AutoUserScope(property.Value.GetString()); + continue; + } + if (property.NameEquals("elevationLevel"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + elevationLevel = new ElevationLevel(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AutoUserSpecification(scope, elevationLevel, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AutoUserSpecification)} does not support writing '{options.Format}' format."); + } + } + + AutoUserSpecification IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAutoUserSpecification(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AutoUserSpecification)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AutoUserSpecification FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAutoUserSpecification(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs new file mode 100644 index 0000000000000..655ea827b2ca1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutoUserSpecification.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies the options for the auto user that runs an Azure Batch Task. + public partial class AutoUserSpecification + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public AutoUserSpecification() + { + } + + /// Initializes a new instance of . + /// The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + /// The elevation level of the auto user. The default value is nonAdmin. + /// Keeps track of any properties unknown to the library. + internal AutoUserSpecification(AutoUserScope? scope, ElevationLevel? elevationLevel, IDictionary serializedAdditionalRawData) + { + Scope = scope; + ElevationLevel = elevationLevel; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The scope for the auto user. The default value is pool. If the pool is running Windows, a value of Task should be specified if stricter isolation between tasks is required, such as if the task mutates the registry in a way which could impact other tasks. + public AutoUserScope? Scope { get; set; } + /// The elevation level of the auto user. The default value is nonAdmin. + public ElevationLevel? ElevationLevel { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.Serialization.cs new file mode 100644 index 0000000000000..1c83efe2f9a70 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.Serialization.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AutomaticOsUpgradePolicy : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutomaticOsUpgradePolicy)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(DisableAutomaticRollback)) + { + writer.WritePropertyName("disableAutomaticRollback"u8); + writer.WriteBooleanValue(DisableAutomaticRollback.Value); + } + if (Optional.IsDefined(EnableAutomaticOsUpgrade)) + { + writer.WritePropertyName("enableAutomaticOSUpgrade"u8); + writer.WriteBooleanValue(EnableAutomaticOsUpgrade.Value); + } + if (Optional.IsDefined(UseRollingUpgradePolicy)) + { + writer.WritePropertyName("useRollingUpgradePolicy"u8); + writer.WriteBooleanValue(UseRollingUpgradePolicy.Value); + } + if (Optional.IsDefined(OsRollingUpgradeDeferral)) + { + writer.WritePropertyName("osRollingUpgradeDeferral"u8); + writer.WriteBooleanValue(OsRollingUpgradeDeferral.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AutomaticOsUpgradePolicy IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AutomaticOsUpgradePolicy)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAutomaticOsUpgradePolicy(document.RootElement, options); + } + + internal static AutomaticOsUpgradePolicy DeserializeAutomaticOsUpgradePolicy(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool? disableAutomaticRollback = default; + bool? enableAutomaticOSUpgrade = default; + bool? useRollingUpgradePolicy = default; + bool? osRollingUpgradeDeferral = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("disableAutomaticRollback"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + disableAutomaticRollback = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("enableAutomaticOSUpgrade"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAutomaticOSUpgrade = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("useRollingUpgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + useRollingUpgradePolicy = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("osRollingUpgradeDeferral"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + osRollingUpgradeDeferral = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AutomaticOsUpgradePolicy(disableAutomaticRollback, enableAutomaticOSUpgrade, useRollingUpgradePolicy, osRollingUpgradeDeferral, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AutomaticOsUpgradePolicy)} does not support writing '{options.Format}' format."); + } + } + + AutomaticOsUpgradePolicy IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAutomaticOsUpgradePolicy(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AutomaticOsUpgradePolicy)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AutomaticOsUpgradePolicy FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAutomaticOsUpgradePolicy(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs new file mode 100644 index 0000000000000..a55f474ba013a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AutomaticOsUpgradePolicy.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The configuration parameters used for performing automatic OS upgrade. + public partial class AutomaticOsUpgradePolicy + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public AutomaticOsUpgradePolicy() + { + } + + /// Initializes a new instance of . + /// Whether OS image rollback feature should be disabled. + /// Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. <br /><br /> If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. + /// Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. + /// Defer OS upgrades on the TVMs if they are running tasks. + /// Keeps track of any properties unknown to the library. + internal AutomaticOsUpgradePolicy(bool? disableAutomaticRollback, bool? enableAutomaticOsUpgrade, bool? useRollingUpgradePolicy, bool? osRollingUpgradeDeferral, IDictionary serializedAdditionalRawData) + { + DisableAutomaticRollback = disableAutomaticRollback; + EnableAutomaticOsUpgrade = enableAutomaticOsUpgrade; + UseRollingUpgradePolicy = useRollingUpgradePolicy; + OsRollingUpgradeDeferral = osRollingUpgradeDeferral; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Whether OS image rollback feature should be disabled. + public bool? DisableAutomaticRollback { get; set; } + /// Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available. <br /><br /> If this is set to true for Windows based pools, [WindowsConfiguration.enableAutomaticUpdates](https://learn.microsoft.com/en-us/rest/api/batchservice/pool/add?tabs=HTTP#windowsconfiguration) cannot be set to true. + public bool? EnableAutomaticOsUpgrade { get; set; } + /// Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Auto OS Upgrade will fallback to the default policy if no policy is defined on the VMSS. + public bool? UseRollingUpgradePolicy { get; set; } + /// Defer OS upgrades on the TVMs if they are running tasks. + public bool? OsRollingUpgradeDeferral { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureBlobFileSystemConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureBlobFileSystemConfiguration.Serialization.cs new file mode 100644 index 0000000000000..d7170869ed0bb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureBlobFileSystemConfiguration.Serialization.cs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AzureBlobFileSystemConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AzureBlobFileSystemConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("accountName"u8); + writer.WriteStringValue(AccountName); + writer.WritePropertyName("containerName"u8); + writer.WriteStringValue(ContainerName); + if (Optional.IsDefined(AccountKey)) + { + writer.WritePropertyName("accountKey"u8); + writer.WriteStringValue(AccountKey); + } + if (Optional.IsDefined(SasKey)) + { + writer.WritePropertyName("sasKey"u8); + writer.WriteStringValue(SasKey); + } + if (Optional.IsDefined(BlobfuseOptions)) + { + writer.WritePropertyName("blobfuseOptions"u8); + writer.WriteStringValue(BlobfuseOptions); + } + writer.WritePropertyName("relativeMountPath"u8); + writer.WriteStringValue(RelativeMountPath); + if (Optional.IsDefined(IdentityReference)) + { + writer.WritePropertyName("identityReference"u8); + writer.WriteObjectValue(IdentityReference, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AzureBlobFileSystemConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AzureBlobFileSystemConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAzureBlobFileSystemConfiguration(document.RootElement, options); + } + + internal static AzureBlobFileSystemConfiguration DeserializeAzureBlobFileSystemConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string accountName = default; + string containerName = default; + string accountKey = default; + string sasKey = default; + string blobfuseOptions = default; + string relativeMountPath = default; + BatchNodeIdentityReference identityReference = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("accountName"u8)) + { + accountName = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerName"u8)) + { + containerName = property.Value.GetString(); + continue; + } + if (property.NameEquals("accountKey"u8)) + { + accountKey = property.Value.GetString(); + continue; + } + if (property.NameEquals("sasKey"u8)) + { + sasKey = property.Value.GetString(); + continue; + } + if (property.NameEquals("blobfuseOptions"u8)) + { + blobfuseOptions = property.Value.GetString(); + continue; + } + if (property.NameEquals("relativeMountPath"u8)) + { + relativeMountPath = property.Value.GetString(); + continue; + } + if (property.NameEquals("identityReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identityReference = BatchNodeIdentityReference.DeserializeBatchNodeIdentityReference(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AzureBlobFileSystemConfiguration( + accountName, + containerName, + accountKey, + sasKey, + blobfuseOptions, + relativeMountPath, + identityReference, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AzureBlobFileSystemConfiguration)} does not support writing '{options.Format}' format."); + } + } + + AzureBlobFileSystemConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAzureBlobFileSystemConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AzureBlobFileSystemConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AzureBlobFileSystemConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAzureBlobFileSystemConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureBlobFileSystemConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureBlobFileSystemConfiguration.cs new file mode 100644 index 0000000000000..bf6bd8b45665c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureBlobFileSystemConfiguration.cs @@ -0,0 +1,105 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information used to connect to an Azure Storage Container using Blobfuse. + public partial class AzureBlobFileSystemConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The Azure Storage Account name. + /// The Azure Blob Storage Container name. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// , or is null. + public AzureBlobFileSystemConfiguration(string accountName, string containerName, string relativeMountPath) + { + Argument.AssertNotNull(accountName, nameof(accountName)); + Argument.AssertNotNull(containerName, nameof(containerName)); + Argument.AssertNotNull(relativeMountPath, nameof(relativeMountPath)); + + AccountName = accountName; + ContainerName = containerName; + RelativeMountPath = relativeMountPath; + } + + /// Initializes a new instance of . + /// The Azure Storage Account name. + /// The Azure Blob Storage Container name. + /// The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. + /// The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. + /// Keeps track of any properties unknown to the library. + internal AzureBlobFileSystemConfiguration(string accountName, string containerName, string accountKey, string sasKey, string blobfuseOptions, string relativeMountPath, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + { + AccountName = accountName; + ContainerName = containerName; + AccountKey = accountKey; + SasKey = sasKey; + BlobfuseOptions = blobfuseOptions; + RelativeMountPath = relativeMountPath; + IdentityReference = identityReference; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AzureBlobFileSystemConfiguration() + { + } + + /// The Azure Storage Account name. + public string AccountName { get; set; } + /// The Azure Blob Storage Container name. + public string ContainerName { get; set; } + /// The Azure Storage Account key. This property is mutually exclusive with both sasKey and identity; exactly one must be specified. + public string AccountKey { get; set; } + /// The Azure Storage SAS token. This property is mutually exclusive with both accountKey and identity; exactly one must be specified. + public string SasKey { get; set; } + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + public string BlobfuseOptions { get; set; } + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + public string RelativeMountPath { get; set; } + /// The reference to the user assigned identity to use to access containerName. This property is mutually exclusive with both accountKey and sasKey; exactly one must be specified. + public BatchNodeIdentityReference IdentityReference { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs new file mode 100644 index 0000000000000..62c45e62098fb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class AzureFileShareConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AzureFileShareConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("accountName"u8); + writer.WriteStringValue(AccountName); + writer.WritePropertyName("azureFileUrl"u8); + writer.WriteStringValue(AzureFileUrl); + writer.WritePropertyName("accountKey"u8); + writer.WriteStringValue(AccountKey); + writer.WritePropertyName("relativeMountPath"u8); + writer.WriteStringValue(RelativeMountPath); + if (Optional.IsDefined(MountOptions)) + { + writer.WritePropertyName("mountOptions"u8); + writer.WriteStringValue(MountOptions); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + AzureFileShareConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(AzureFileShareConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeAzureFileShareConfiguration(document.RootElement, options); + } + + internal static AzureFileShareConfiguration DeserializeAzureFileShareConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string accountName = default; + string azureFileUrl = default; + string accountKey = default; + string relativeMountPath = default; + string mountOptions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("accountName"u8)) + { + accountName = property.Value.GetString(); + continue; + } + if (property.NameEquals("azureFileUrl"u8)) + { + azureFileUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("accountKey"u8)) + { + accountKey = property.Value.GetString(); + continue; + } + if (property.NameEquals("relativeMountPath"u8)) + { + relativeMountPath = property.Value.GetString(); + continue; + } + if (property.NameEquals("mountOptions"u8)) + { + mountOptions = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new AzureFileShareConfiguration( + accountName, + azureFileUrl, + accountKey, + relativeMountPath, + mountOptions, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(AzureFileShareConfiguration)} does not support writing '{options.Format}' format."); + } + } + + AzureFileShareConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeAzureFileShareConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(AzureFileShareConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static AzureFileShareConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeAzureFileShareConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs new file mode 100644 index 0000000000000..76049a45e4288 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information used to connect to an Azure Fileshare. + public partial class AzureFileShareConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The Azure Storage account name. + /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. + /// The Azure Storage account key. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// , , or is null. + public AzureFileShareConfiguration(string accountName, string azureFileUrl, string accountKey, string relativeMountPath) + { + Argument.AssertNotNull(accountName, nameof(accountName)); + Argument.AssertNotNull(azureFileUrl, nameof(azureFileUrl)); + Argument.AssertNotNull(accountKey, nameof(accountKey)); + Argument.AssertNotNull(relativeMountPath, nameof(relativeMountPath)); + + AccountName = accountName; + AzureFileUrl = azureFileUrl; + AccountKey = accountKey; + RelativeMountPath = relativeMountPath; + } + + /// Initializes a new instance of . + /// The Azure Storage account name. + /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. + /// The Azure Storage account key. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + /// Keeps track of any properties unknown to the library. + internal AzureFileShareConfiguration(string accountName, string azureFileUrl, string accountKey, string relativeMountPath, string mountOptions, IDictionary serializedAdditionalRawData) + { + AccountName = accountName; + AzureFileUrl = azureFileUrl; + AccountKey = accountKey; + RelativeMountPath = relativeMountPath; + MountOptions = mountOptions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal AzureFileShareConfiguration() + { + } + + /// The Azure Storage account name. + public string AccountName { get; set; } + /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. + public string AzureFileUrl { get; set; } + /// The Azure Storage account key. + public string AccountKey { get; set; } + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + public string RelativeMountPath { get; set; } + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + public string MountOptions { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplication.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplication.Serialization.cs new file mode 100644 index 0000000000000..ecc68700eb6c8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplication.Serialization.cs @@ -0,0 +1,161 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchApplication : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchApplication)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + writer.WritePropertyName("versions"u8); + writer.WriteStartArray(); + foreach (var item in Versions) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchApplication IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchApplication)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchApplication(document.RootElement, options); + } + + internal static BatchApplication DeserializeBatchApplication(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + IReadOnlyList versions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("versions"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + versions = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchApplication(id, displayName, versions, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchApplication)} does not support writing '{options.Format}' format."); + } + } + + BatchApplication IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchApplication(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchApplication)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchApplication FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchApplication(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplication.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplication.cs new file mode 100644 index 0000000000000..32618e1d98915 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplication.cs @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// Contains information about an application in an Azure Batch Account. + public partial class BatchApplication + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A string that uniquely identifies the application within the Account. + /// The display name for the application. + /// The list of available versions of the application. + /// , or is null. + internal BatchApplication(string id, string displayName, IEnumerable versions) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(displayName, nameof(displayName)); + Argument.AssertNotNull(versions, nameof(versions)); + + Id = id; + DisplayName = displayName; + Versions = versions.ToList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the application within the Account. + /// The display name for the application. + /// The list of available versions of the application. + /// Keeps track of any properties unknown to the library. + internal BatchApplication(string id, string displayName, IReadOnlyList versions, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + Versions = versions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchApplication() + { + } + + /// A string that uniquely identifies the application within the Account. + public string Id { get; } + /// The display name for the application. + public string DisplayName { get; } + /// The list of available versions of the application. + public IReadOnlyList Versions { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplicationPackageReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplicationPackageReference.Serialization.cs new file mode 100644 index 0000000000000..3aae3911ba245 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplicationPackageReference.Serialization.cs @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchApplicationPackageReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchApplicationPackageReference)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("applicationId"u8); + writer.WriteStringValue(ApplicationId); + if (Optional.IsDefined(Version)) + { + writer.WritePropertyName("version"u8); + writer.WriteStringValue(Version); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchApplicationPackageReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchApplicationPackageReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchApplicationPackageReference(document.RootElement, options); + } + + internal static BatchApplicationPackageReference DeserializeBatchApplicationPackageReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string applicationId = default; + string version = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("applicationId"u8)) + { + applicationId = property.Value.GetString(); + continue; + } + if (property.NameEquals("version"u8)) + { + version = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchApplicationPackageReference(applicationId, version, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchApplicationPackageReference)} does not support writing '{options.Format}' format."); + } + } + + BatchApplicationPackageReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchApplicationPackageReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchApplicationPackageReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchApplicationPackageReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchApplicationPackageReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplicationPackageReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplicationPackageReference.cs new file mode 100644 index 0000000000000..f7d288b197406 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchApplicationPackageReference.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A reference to an Package to be deployed to Compute Nodes. + public partial class BatchApplicationPackageReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). + /// is null. + public BatchApplicationPackageReference(string applicationId) + { + Argument.AssertNotNull(applicationId, nameof(applicationId)); + + ApplicationId = applicationId; + } + + /// Initializes a new instance of . + /// The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). + /// The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. + /// Keeps track of any properties unknown to the library. + internal BatchApplicationPackageReference(string applicationId, string version, IDictionary serializedAdditionalRawData) + { + ApplicationId = applicationId; + Version = version; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchApplicationPackageReference() + { + } + + /// The ID of the application to deploy. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). + public string ApplicationId { get; set; } + /// The version of the application to deploy. If omitted, the default version is deployed. If this is omitted on a Pool, and no default version is specified for this application, the request fails with the error code InvalidApplicationPackageReferences and HTTP status code 409. If this is omitted on a Task, and no default version is specified for this application, the Task fails with a pre-processing error. + public string Version { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAutoPoolSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAutoPoolSpecification.Serialization.cs new file mode 100644 index 0000000000000..cde73fe116664 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAutoPoolSpecification.Serialization.cs @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchAutoPoolSpecification : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchAutoPoolSpecification)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(AutoPoolIdPrefix)) + { + writer.WritePropertyName("autoPoolIdPrefix"u8); + writer.WriteStringValue(AutoPoolIdPrefix); + } + writer.WritePropertyName("poolLifetimeOption"u8); + writer.WriteStringValue(PoolLifetimeOption.ToString()); + if (Optional.IsDefined(KeepAlive)) + { + writer.WritePropertyName("keepAlive"u8); + writer.WriteBooleanValue(KeepAlive.Value); + } + if (Optional.IsDefined(Pool)) + { + writer.WritePropertyName("pool"u8); + writer.WriteObjectValue(Pool, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchAutoPoolSpecification IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchAutoPoolSpecification)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchAutoPoolSpecification(document.RootElement, options); + } + + internal static BatchAutoPoolSpecification DeserializeBatchAutoPoolSpecification(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string autoPoolIdPrefix = default; + BatchPoolLifetimeOption poolLifetimeOption = default; + bool? keepAlive = default; + BatchPoolSpecification pool = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("autoPoolIdPrefix"u8)) + { + autoPoolIdPrefix = property.Value.GetString(); + continue; + } + if (property.NameEquals("poolLifetimeOption"u8)) + { + poolLifetimeOption = new BatchPoolLifetimeOption(property.Value.GetString()); + continue; + } + if (property.NameEquals("keepAlive"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + keepAlive = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("pool"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + pool = BatchPoolSpecification.DeserializeBatchPoolSpecification(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchAutoPoolSpecification(autoPoolIdPrefix, poolLifetimeOption, keepAlive, pool, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchAutoPoolSpecification)} does not support writing '{options.Format}' format."); + } + } + + BatchAutoPoolSpecification IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchAutoPoolSpecification(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchAutoPoolSpecification)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchAutoPoolSpecification FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchAutoPoolSpecification(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAutoPoolSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAutoPoolSpecification.cs new file mode 100644 index 0000000000000..3b9554463013c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAutoPoolSpecification.cs @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Specifies characteristics for a temporary 'auto pool'. The Batch service will + /// create this auto Pool when the Job is submitted. + /// + public partial class BatchAutoPoolSpecification + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + public BatchAutoPoolSpecification(BatchPoolLifetimeOption poolLifetimeOption) + { + PoolLifetimeOption = poolLifetimeOption; + } + + /// Initializes a new instance of . + /// A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. + /// The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + /// Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. + /// The Pool specification for the auto Pool. + /// Keeps track of any properties unknown to the library. + internal BatchAutoPoolSpecification(string autoPoolIdPrefix, BatchPoolLifetimeOption poolLifetimeOption, bool? keepAlive, BatchPoolSpecification pool, IDictionary serializedAdditionalRawData) + { + AutoPoolIdPrefix = autoPoolIdPrefix; + PoolLifetimeOption = poolLifetimeOption; + KeepAlive = keepAlive; + Pool = pool; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchAutoPoolSpecification() + { + } + + /// A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on creation. To distinguish between Pools created for different purposes, you can specify this element to add a prefix to the ID that is assigned. The prefix can be up to 20 characters long. + public string AutoPoolIdPrefix { get; set; } + /// The minimum lifetime of created auto Pools, and how multiple Jobs on a schedule are assigned to Pools. + public BatchPoolLifetimeOption PoolLifetimeOption { get; set; } + /// Whether to keep an auto Pool alive after its lifetime expires. If false, the Batch service deletes the Pool once its lifetime (as determined by the poolLifetimeOption setting) expires; that is, when the Job or Job Schedule completes. If true, the Batch service does not delete the Pool automatically. It is up to the user to delete auto Pools created with this option. + public bool? KeepAlive { get; set; } + /// The Pool specification for the auto Pool. + public BatchPoolSpecification Pool { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs new file mode 100644 index 0000000000000..e9ae40ae68a88 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs @@ -0,0 +1,11701 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Autorest.CSharp.Core; +using Azure.Core; +using Azure.Core.Pipeline; + +namespace Azure.Compute.Batch +{ + // Data plane generated client. + /// The Batch service client. + public partial class BatchClient + { + private static readonly string[] AuthorizationScopes = new string[] { "https://batch.core.windows.net//.default" }; + private readonly TokenCredential _tokenCredential; + private readonly HttpPipeline _pipeline; + private readonly Uri _endpoint; + private readonly string _apiVersion; + + /// The ClientDiagnostics is used to provide tracing support for the client library. + internal ClientDiagnostics ClientDiagnostics { get; } + + /// The HTTP pipeline for sending and receiving REST requests and responses. + public virtual HttpPipeline Pipeline => _pipeline; + + /// Initializes a new instance of BatchClient for mocking. + protected BatchClient() + { + } + + /// Initializes a new instance of BatchClient. + /// Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com). + /// A credential used to authenticate to an Azure Service. + /// or is null. + public BatchClient(Uri endpoint, TokenCredential credential) : this(endpoint, credential, new BatchClientOptions()) + { + } + + /// Initializes a new instance of BatchClient. + /// Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com). + /// A credential used to authenticate to an Azure Service. + /// The options for configuring the client. + /// or is null. + public BatchClient(Uri endpoint, TokenCredential credential, BatchClientOptions options) + { + Argument.AssertNotNull(endpoint, nameof(endpoint)); + Argument.AssertNotNull(credential, nameof(credential)); + options ??= new BatchClientOptions(); + + ClientDiagnostics = new ClientDiagnostics(options, true); + _tokenCredential = credential; + _pipeline = HttpPipelineBuilder.Build(options, Array.Empty(), new HttpPipelinePolicy[] { new BearerTokenAuthenticationPolicy(_tokenCredential, AuthorizationScopes) }, new ResponseClassifier()); + _endpoint = endpoint; + _apiVersion = options.Version; + } + + /// Gets information about the specified Application. + /// The ID of the Application. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This operation returns only Applications and versions that are available for + /// use on Compute Nodes; that is, that can be used in an Package reference. For + /// administrator information about Applications and versions that are not yet + /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager + /// API. + /// + /// + public virtual async Task> GetApplicationAsync(string applicationId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetApplicationAsync(applicationId, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(BatchApplication.FromResponse(response), response); + } + + /// Gets information about the specified Application. + /// The ID of the Application. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This operation returns only Applications and versions that are available for + /// use on Compute Nodes; that is, that can be used in an Package reference. For + /// administrator information about Applications and versions that are not yet + /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager + /// API. + /// + /// + public virtual Response GetApplication(string applicationId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetApplication(applicationId, timeOutInSeconds, ocpdate, context); + return Response.FromValue(BatchApplication.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Application. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Application. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetApplicationAsync(string applicationId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetApplication"); + scope.Start(); + try + { + using HttpMessage message = CreateGetApplicationRequest(applicationId, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Application. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Application. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetApplication(string applicationId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetApplication"); + scope.Start(); + try + { + using HttpMessage message = CreateGetApplicationRequest(applicationId, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Creates a Pool to the specified Account. + /// The Pool to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + /// When naming Pools, avoid including sensitive information such as user names or + /// secret project names. This information may appear in telemetry logs accessible + /// to Microsoft Support engineers. + /// + /// + public virtual async Task CreatePoolAsync(BatchPoolCreateContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(pool, nameof(pool)); + + using RequestContent content = pool.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreatePoolAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Creates a Pool to the specified Account. + /// The Pool to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + /// When naming Pools, avoid including sensitive information such as user names or + /// secret project names. This information may appear in telemetry logs accessible + /// to Microsoft Support engineers. + /// + /// + public virtual Response CreatePool(BatchPoolCreateContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(pool, nameof(pool)); + + using RequestContent content = pool.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreatePool(content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Creates a Pool to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreatePoolAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreatePool"); + scope.Start(); + try + { + using HttpMessage message = CreateCreatePoolRequest(content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a Pool to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreatePool(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreatePool"); + scope.Start(); + try + { + using HttpMessage message = CreateCreatePoolRequest(content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Pool from the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeletePoolAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeletePool"); + scope.Start(); + try + { + using HttpMessage message = CreateDeletePoolRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Pool from the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeletePool(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeletePool"); + scope.Start(); + try + { + using HttpMessage message = CreateDeletePoolRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets information about the specified Pool. + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetPoolAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetPoolAsync(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(BatchPool.FromResponse(response), response); + } + + /// Gets information about the specified Pool. + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual Response GetPool(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetPool(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return Response.FromValue(BatchPool.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetPoolAsync(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetPool"); + scope.Start(); + try + { + using HttpMessage message = CreateGetPoolRequest(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetPool(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetPool"); + scope.Start(); + try + { + using HttpMessage message = CreateGetPoolRequest(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UpdatePoolAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdatePool"); + scope.Start(); + try + { + using HttpMessage message = CreateUpdatePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UpdatePool(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdatePool"); + scope.Start(); + try + { + using HttpMessage message = CreateUpdatePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Disables automatic scaling for a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool on which to disable automatic scaling. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DisablePoolAutoScaleAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisablePoolAutoScale"); + scope.Start(); + try + { + using HttpMessage message = CreateDisablePoolAutoScaleRequest(poolId, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Disables automatic scaling for a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool on which to disable automatic scaling. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DisablePoolAutoScale(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisablePoolAutoScale"); + scope.Start(); + try + { + using HttpMessage message = CreateDisablePoolAutoScaleRequest(poolId, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Enables automatic scaling for a Pool. + /// The ID of the Pool to get. + /// The options to use for enabling automatic scaling. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// You cannot enable automatic scaling on a Pool if a resize operation is in + /// progress on the Pool. If automatic scaling of the Pool is currently disabled, + /// you must specify a valid autoscale formula as part of the request. If automatic + /// scaling of the Pool is already enabled, you may specify a new autoscale formula + /// and/or a new evaluation interval. You cannot call this API for the same Pool + /// more than once every 30 seconds. + /// + /// + public virtual async Task EnablePoolAutoScaleAsync(string poolId, BatchPoolEnableAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await EnablePoolAutoScaleAsync(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Enables automatic scaling for a Pool. + /// The ID of the Pool to get. + /// The options to use for enabling automatic scaling. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// You cannot enable automatic scaling on a Pool if a resize operation is in + /// progress on the Pool. If automatic scaling of the Pool is currently disabled, + /// you must specify a valid autoscale formula as part of the request. If automatic + /// scaling of the Pool is already enabled, you may specify a new autoscale formula + /// and/or a new evaluation interval. You cannot call this API for the same Pool + /// more than once every 30 seconds. + /// + /// + public virtual Response EnablePoolAutoScale(string poolId, BatchPoolEnableAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = EnablePoolAutoScale(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Enables automatic scaling for a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task EnablePoolAutoScaleAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnablePoolAutoScale"); + scope.Start(); + try + { + using HttpMessage message = CreateEnablePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Enables automatic scaling for a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response EnablePoolAutoScale(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnablePoolAutoScale"); + scope.Start(); + try + { + using HttpMessage message = CreateEnablePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets the result of evaluating an automatic scaling formula on the Pool. + /// The ID of the Pool on which to evaluate the automatic scaling formula. + /// The options to use for evaluating the automatic scaling formula. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This API is primarily for validating an autoscale formula, as it simply returns + /// the result without applying the formula to the Pool. The Pool must have auto + /// scaling enabled in order to evaluate a formula. + /// + /// + public virtual async Task> EvaluatePoolAutoScaleAsync(string poolId, BatchPoolEvaluateAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await EvaluatePoolAutoScaleAsync(poolId, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(AutoScaleRun.FromResponse(response), response); + } + + /// Gets the result of evaluating an automatic scaling formula on the Pool. + /// The ID of the Pool on which to evaluate the automatic scaling formula. + /// The options to use for evaluating the automatic scaling formula. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This API is primarily for validating an autoscale formula, as it simply returns + /// the result without applying the formula to the Pool. The Pool must have auto + /// scaling enabled in order to evaluate a formula. + /// + /// + public virtual Response EvaluatePoolAutoScale(string poolId, BatchPoolEvaluateAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = EvaluatePoolAutoScale(poolId, content0, timeOutInSeconds, ocpdate, context); + return Response.FromValue(AutoScaleRun.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets the result of evaluating an automatic scaling formula on the Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool on which to evaluate the automatic scaling formula. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EvaluatePoolAutoScale"); + scope.Start(); + try + { + using HttpMessage message = CreateEvaluatePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets the result of evaluating an automatic scaling formula on the Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool on which to evaluate the automatic scaling formula. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response EvaluatePoolAutoScale(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EvaluatePoolAutoScale"); + scope.Start(); + try + { + using HttpMessage message = CreateEvaluatePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Changes the number of Compute Nodes that are assigned to a Pool. + /// The ID of the Pool to get. + /// The options to use for resizing the pool. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// You can only resize a Pool when its allocation state is steady. If the Pool is + /// already resizing, the request fails with status code 409. When you resize a + /// Pool, the Pool's allocation state changes from steady to resizing. You cannot + /// resize Pools which are configured for automatic scaling. If you try to do this, + /// the Batch service returns an error 409. If you resize a Pool downwards, the + /// Batch service chooses which Compute Nodes to remove. To remove specific Compute + /// Nodes, use the Pool remove Compute Nodes API instead. + /// + /// + public virtual async Task ResizePoolAsync(string poolId, BatchPoolResizeContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ResizePoolAsync(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Changes the number of Compute Nodes that are assigned to a Pool. + /// The ID of the Pool to get. + /// The options to use for resizing the pool. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// You can only resize a Pool when its allocation state is steady. If the Pool is + /// already resizing, the request fails with status code 409. When you resize a + /// Pool, the Pool's allocation state changes from steady to resizing. You cannot + /// resize Pools which are configured for automatic scaling. If you try to do this, + /// the Batch service returns an error 409. If you resize a Pool downwards, the + /// Batch service chooses which Compute Nodes to remove. To remove specific Compute + /// Nodes, use the Pool remove Compute Nodes API instead. + /// + /// + public virtual Response ResizePool(string poolId, BatchPoolResizeContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ResizePool(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Changes the number of Compute Nodes that are assigned to a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ResizePoolAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ResizePool"); + scope.Start(); + try + { + using HttpMessage message = CreateResizePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Changes the number of Compute Nodes that are assigned to a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ResizePool(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ResizePool"); + scope.Start(); + try + { + using HttpMessage message = CreateResizePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Stops an ongoing resize operation on the Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task StopPoolResizeAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.StopPoolResize"); + scope.Start(); + try + { + using HttpMessage message = CreateStopPoolResizeRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Stops an ongoing resize operation on the Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response StopPoolResize(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.StopPoolResize"); + scope.Start(); + try + { + using HttpMessage message = CreateStopPoolResizeRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Updates the properties of the specified Pool. + /// The ID of the Pool to update. + /// The options to use for replacing properties on the pool. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Pool. For example, if + /// the Pool has a StartTask associated with it and if StartTask is not specified + /// with this request, then the Batch service will remove the existing StartTask. + /// + /// + public virtual async Task ReplacePoolPropertiesAsync(string poolId, BatchPoolReplaceContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(pool, nameof(pool)); + + using RequestContent content = pool.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplacePoolPropertiesAsync(poolId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Updates the properties of the specified Pool. + /// The ID of the Pool to update. + /// The options to use for replacing properties on the pool. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Pool. For example, if + /// the Pool has a StartTask associated with it and if StartTask is not specified + /// with this request, then the Batch service will remove the existing StartTask. + /// + /// + public virtual Response ReplacePoolProperties(string poolId, BatchPoolReplaceContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(pool, nameof(pool)); + + using RequestContent content = pool.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplacePoolProperties(poolId, content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReplacePoolPropertiesAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplacePoolProperties"); + scope.Start(); + try + { + using HttpMessage message = CreateReplacePoolPropertiesRequest(poolId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplacePoolProperties(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplacePoolProperties"); + scope.Start(); + try + { + using HttpMessage message = CreateReplacePoolPropertiesRequest(poolId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Removes Compute Nodes from the specified Pool. + /// The ID of the Pool to get. + /// The options to use for removing the node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This operation can only run when the allocation state of the Pool is steady. + /// When this operation runs, the allocation state changes from steady to resizing. + /// Each request may remove up to 100 nodes. + /// + /// + public virtual async Task RemoveNodesAsync(string poolId, BatchNodeRemoveContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await RemoveNodesAsync(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Removes Compute Nodes from the specified Pool. + /// The ID of the Pool to get. + /// The options to use for removing the node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This operation can only run when the allocation state of the Pool is steady. + /// When this operation runs, the allocation state changes from steady to resizing. + /// Each request may remove up to 100 nodes. + /// + /// + public virtual Response RemoveNodes(string poolId, BatchNodeRemoveContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = RemoveNodes(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Removes Compute Nodes from the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task RemoveNodesAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.RemoveNodes"); + scope.Start(); + try + { + using HttpMessage message = CreateRemoveNodesRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Removes Compute Nodes from the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response RemoveNodes(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.RemoveNodes"); + scope.Start(); + try + { + using HttpMessage message = CreateRemoveNodesRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJob"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJob"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets information about the specified Job. + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetJobAsync(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(BatchJob.FromResponse(response), response); + } + + /// Gets information about the specified Job. + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual Response GetJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetJob(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return Response.FromValue(BatchJob.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetJobAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJob"); + scope.Start(); + try + { + using HttpMessage message = CreateGetJobRequest(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetJob(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJob"); + scope.Start(); + try + { + using HttpMessage message = CreateGetJobRequest(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job whose properties you want to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UpdateJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJob"); + scope.Start(); + try + { + using HttpMessage message = CreateUpdateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job whose properties you want to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UpdateJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJob"); + scope.Start(); + try + { + using HttpMessage message = CreateUpdateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Updates the properties of the specified Job. + /// The ID of the Job whose properties you want to update. + /// A job with updated properties. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Job. For example, if + /// the Job has constraints associated with it and if constraints is not specified + /// with this request, then the Batch service will remove the existing constraints. + /// + /// + public virtual async Task ReplaceJobAsync(string jobId, BatchJob job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(job, nameof(job)); + + using RequestContent content = job.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplaceJobAsync(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Updates the properties of the specified Job. + /// The ID of the Job whose properties you want to update. + /// A job with updated properties. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Job. For example, if + /// the Job has constraints associated with it and if constraints is not specified + /// with this request, then the Batch service will remove the existing constraints. + /// + /// + public virtual Response ReplaceJob(string jobId, BatchJob job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(job, nameof(job)); + + using RequestContent content = job.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplaceJob(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job whose properties you want to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReplaceJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJob"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job whose properties you want to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplaceJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJob"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Disables the specified Job, preventing new Tasks from running. + /// The ID of the Job to disable. + /// The options to use for disabling the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// The Batch Service immediately moves the Job to the disabling state. Batch then + /// uses the disableTasks parameter to determine what to do with the currently + /// running Tasks of the Job. The Job remains in the disabling state until the + /// disable operation is completed and all Tasks have been dealt with according to + /// the disableTasks option; the Job then moves to the disabled state. No new Tasks + /// are started under the Job until it moves back to active state. If you try to + /// disable a Job that is in any state other than active, disabling, or disabled, + /// the request fails with status code 409. + /// + /// + public virtual async Task DisableJobAsync(string jobId, BatchJobDisableContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await DisableJobAsync(jobId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Disables the specified Job, preventing new Tasks from running. + /// The ID of the Job to disable. + /// The options to use for disabling the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// The Batch Service immediately moves the Job to the disabling state. Batch then + /// uses the disableTasks parameter to determine what to do with the currently + /// running Tasks of the Job. The Job remains in the disabling state until the + /// disable operation is completed and all Tasks have been dealt with according to + /// the disableTasks option; the Job then moves to the disabled state. No new Tasks + /// are started under the Job until it moves back to active state. If you try to + /// disable a Job that is in any state other than active, disabling, or disabled, + /// the request fails with status code 409. + /// + /// + public virtual Response DisableJob(string jobId, BatchJobDisableContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = DisableJob(jobId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Disables the specified Job, preventing new Tasks from running. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to disable. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DisableJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJob"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Disables the specified Job, preventing new Tasks from running. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to disable. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DisableJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJob"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Enables the specified Job, allowing new Tasks to run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job to enable. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task EnableJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJob"); + scope.Start(); + try + { + using HttpMessage message = CreateEnableJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Enables the specified Job, allowing new Tasks to run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job to enable. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response EnableJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJob"); + scope.Start(); + try + { + using HttpMessage message = CreateEnableJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Terminates the specified Job, marking it as completed. + /// The ID of the Job to terminate. + /// The options to use for terminating the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// When a Terminate Job request is received, the Batch service sets the Job to the + /// terminating state. The Batch service then terminates any running Tasks + /// associated with the Job and runs any required Job release Tasks. Then the Job + /// moves into the completed state. If there are any Tasks in the Job in the active + /// state, they will remain in the active state. Once a Job is terminated, new + /// Tasks cannot be added and any remaining active Tasks will not be scheduled. + /// + /// + public virtual async Task TerminateJobAsync(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await TerminateJobAsync(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Terminates the specified Job, marking it as completed. + /// The ID of the Job to terminate. + /// The options to use for terminating the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// When a Terminate Job request is received, the Batch service sets the Job to the + /// terminating state. The Batch service then terminates any running Tasks + /// associated with the Job and runs any required Job release Tasks. Then the Job + /// moves into the completed state. If there are any Tasks in the Job in the active + /// state, they will remain in the active state. Once a Job is terminated, new + /// Tasks cannot be added and any remaining active Tasks will not be scheduled. + /// + /// + public virtual Response TerminateJob(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = TerminateJob(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Terminates the specified Job, marking it as completed. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to terminate. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task TerminateJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJob"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Terminates the specified Job, marking it as completed. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to terminate. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response TerminateJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJob"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Creates a Job to the specified Account. + /// The Job to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + /// The Batch service supports two ways to control the work done as part of a Job. + /// In the first approach, the user specifies a Job Manager Task. The Batch service + /// launches this Task when it is ready to start the Job. The Job Manager Task + /// controls all other Tasks that run under this Job, by using the Task APIs. In + /// the second approach, the user directly controls the execution of Tasks under an + /// active Job, by using the Task APIs. Also note: when naming Jobs, avoid + /// including sensitive information such as user names or secret project names. + /// This information may appear in telemetry logs accessible to Microsoft Support + /// engineers. + /// + /// + public virtual async Task CreateJobAsync(BatchJobCreateContent job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(job, nameof(job)); + + using RequestContent content = job.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateJobAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Creates a Job to the specified Account. + /// The Job to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + /// The Batch service supports two ways to control the work done as part of a Job. + /// In the first approach, the user specifies a Job Manager Task. The Batch service + /// launches this Task when it is ready to start the Job. The Job Manager Task + /// controls all other Tasks that run under this Job, by using the Task APIs. In + /// the second approach, the user directly controls the execution of Tasks under an + /// active Job, by using the Task APIs. Also note: when naming Jobs, avoid + /// including sensitive information such as user names or secret project names. + /// This information may appear in telemetry logs accessible to Microsoft Support + /// engineers. + /// + /// + public virtual Response CreateJob(BatchJobCreateContent job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(job, nameof(job)); + + using RequestContent content = job.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateJob(content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Creates a Job to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreateJobAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJob"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateJobRequest(content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a Job to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreateJob(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJob"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateJobRequest(content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets the Task counts for the specified Job. + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// Task counts provide a count of the Tasks by active, running or completed Task + /// state, and a count of Tasks which succeeded or failed. Tasks in the preparing + /// state are counted as running. Note that the numbers returned may not always be + /// up to date. If you need exact task counts, use a list query. + /// + /// + public virtual async Task> GetJobTaskCountsAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetJobTaskCountsAsync(jobId, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(BatchTaskCountsResult.FromResponse(response), response); + } + + /// Gets the Task counts for the specified Job. + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// Task counts provide a count of the Tasks by active, running or completed Task + /// state, and a count of Tasks which succeeded or failed. Tasks in the preparing + /// state are counted as running. Note that the numbers returned may not always be + /// up to date. If you need exact task counts, use a list query. + /// + /// + public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetJobTaskCounts(jobId, timeOutInSeconds, ocpdate, context); + return Response.FromValue(BatchTaskCountsResult.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets the Task counts for the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetJobTaskCountsAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobTaskCounts"); + scope.Start(); + try + { + using HttpMessage message = CreateGetJobTaskCountsRequest(jobId, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets the Task counts for the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobTaskCounts"); + scope.Start(); + try + { + using HttpMessage message = CreateGetJobTaskCountsRequest(jobId, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Job Schedule from the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Job Schedule from the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets information about the specified Job Schedule. + /// The ID of the Job Schedule to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetJobScheduleAsync(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(BatchJobSchedule.FromResponse(response), response); + } + + /// Gets information about the specified Job Schedule. + /// The ID of the Job Schedule to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetJobSchedule(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return Response.FromValue(BatchJobSchedule.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job Schedule to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job Schedule to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UpdateJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Updates the properties of the specified Job Schedule. + /// The ID of the Job Schedule to update. + /// A Job Schedule with updated properties. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Job Schedule. For + /// example, if the schedule property is not specified with this request, then the + /// Batch service will remove the existing schedule. Changes to a Job Schedule only + /// impact Jobs created by the schedule after the update has taken place; currently + /// running Jobs are unaffected. + /// + /// + public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + + using RequestContent content = jobSchedule.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplaceJobScheduleAsync(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Updates the properties of the specified Job Schedule. + /// The ID of the Job Schedule to update. + /// A Job Schedule with updated properties. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This fully replaces all the updatable properties of the Job Schedule. For + /// example, if the schedule property is not specified with this request, then the + /// Batch service will remove the existing schedule. Changes to a Job Schedule only + /// impact Jobs created by the schedule after the update has taken place; currently + /// running Jobs are unaffected. + /// + /// + public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + + using RequestContent content = jobSchedule.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplaceJobSchedule(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job Schedule to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job Schedule to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Disables a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to disable. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DisableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Disables a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to disable. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Enables a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to enable. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task EnableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Enables a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to enable. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Terminates a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to terminates. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task TerminateJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Terminates a Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule to terminates. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response TerminateJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Creates a Job Schedule to the specified Account. + /// The Job Schedule to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + public virtual async Task CreateJobScheduleAsync(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + + using RequestContent content = jobSchedule.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateJobScheduleAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Creates a Job Schedule to the specified Account. + /// The Job Schedule to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// is null. + /// + public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); + + using RequestContent content = jobSchedule.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateJobSchedule(content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Creates a Job Schedule to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreateJobScheduleAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a Job Schedule to the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreateJobSchedule(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateJobSchedule"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Creates a Task to the specified Job. + /// The ID of the Job to which the Task is to be created. + /// The Task to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// The maximum lifetime of a Task from addition to completion is 180 days. If a + /// Task has not completed within 180 days of being added it will be terminated by + /// the Batch service and left in whatever state it was in at that time. + /// + /// + public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(task, nameof(task)); + + using RequestContent content = task.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateTaskAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Creates a Task to the specified Job. + /// The ID of the Job to which the Task is to be created. + /// The Task to be created. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// The maximum lifetime of a Task from addition to completion is 180 days. If a + /// Task has not completed within 180 days of being added it will be terminated by + /// the Batch service and left in whatever state it was in at that time. + /// + /// + public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(task, nameof(task)); + + using RequestContent content = task.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateTask(jobId, content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Creates a Task to the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to which the Task is to be created. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreateTaskAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Creates a Task to the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to which the Task is to be created. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreateTask(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Adds a collection of Tasks to the specified Job. + /// The ID of the Job to which the Task collection is to be added. + /// The Tasks to be added. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// Note that each Task must have a unique ID. The Batch service may not return the + /// results for each Task in the same order the Tasks were submitted in this + /// request. If the server times out or the connection is closed during the + /// request, the request may have been partially or fully processed, or not at all. + /// In such cases, the user should re-issue the request. Note that it is up to the + /// user to correctly handle failures when re-issuing a request. For example, you + /// should use the same Task IDs during a retry so that if the prior operation + /// succeeded, the retry will not create extra Tasks unexpectedly. If the response + /// contains any Tasks which failed to add, a client can retry the request. In a + /// retry, it is most efficient to resubmit only Tasks that failed to add, and to + /// omit Tasks that were successfully added on the first attempt. The maximum + /// lifetime of a Task from addition to completion is 180 days. If a Task has not + /// completed within 180 days of being added it will be terminated by the Batch + /// service and left in whatever state it was in at that time. + /// + /// + public virtual async Task> CreateTaskCollectionAsync(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(taskCollection, nameof(taskCollection)); + + using RequestContent content = taskCollection.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateTaskCollectionAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); + } + + /// Adds a collection of Tasks to the specified Job. + /// The ID of the Job to which the Task collection is to be added. + /// The Tasks to be added. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// + /// Note that each Task must have a unique ID. The Batch service may not return the + /// results for each Task in the same order the Tasks were submitted in this + /// request. If the server times out or the connection is closed during the + /// request, the request may have been partially or fully processed, or not at all. + /// In such cases, the user should re-issue the request. Note that it is up to the + /// user to correctly handle failures when re-issuing a request. For example, you + /// should use the same Task IDs during a retry so that if the prior operation + /// succeeded, the retry will not create extra Tasks unexpectedly. If the response + /// contains any Tasks which failed to add, a client can retry the request. In a + /// retry, it is most efficient to resubmit only Tasks that failed to add, and to + /// omit Tasks that were successfully added on the first attempt. The maximum + /// lifetime of a Task from addition to completion is 180 days. If a Task has not + /// completed within 180 days of being added it will be terminated by the Batch + /// service and left in whatever state it was in at that time. + /// + /// + public virtual Response CreateTaskCollection(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(taskCollection, nameof(taskCollection)); + + using RequestContent content = taskCollection.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateTaskCollection(jobId, content, timeOutInSeconds, ocpdate, context); + return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); + } + + /// + /// [Protocol Method] Adds a collection of Tasks to the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to which the Task collection is to be added. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreateTaskCollectionAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTaskCollection"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Adds a collection of Tasks to the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job to which the Task collection is to be added. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreateTaskCollection(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateTaskCollection"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Task from the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job from which to delete the Task. + /// The ID of the Task to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTask"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a Task from the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job from which to delete the Task. + /// The ID of the Task to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTask"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets information about the specified Task. + /// The ID of the Job that contains the Task. + /// The ID of the Task to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// For multi-instance Tasks, information such as affinityId, executionInfo and + /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + /// information about subtasks. + /// + /// + public virtual async Task> GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetTaskAsync(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(BatchTask.FromResponse(response), response); + } + + /// Gets information about the specified Task. + /// The ID of the Job that contains the Task. + /// The ID of the Task to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// For multi-instance Tasks, information such as affinityId, executionInfo and + /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + /// information about subtasks. + /// + /// + public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetTask(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return Response.FromValue(BatchTask.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTask"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTask"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Updates the properties of the specified Task. + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The Task to update. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual async Task ReplaceTaskAsync(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(task, nameof(task)); + + using RequestContent content = task.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplaceTaskAsync(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + return response; + } + + /// Updates the properties of the specified Task. + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The Task to update. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(task, nameof(task)); + + using RequestContent content = task.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplaceTask(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return response; + } + + /// + /// [Protocol Method] Updates the properties of the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReplaceTaskAsync(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceTask"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the properties of the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplaceTask(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceTask"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Terminates the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to terminate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task TerminateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Terminates the specified Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to terminate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response TerminateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Reactivates a Task, allowing it to run again even if its retry count has been + /// exhausted. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to reactivate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReactivateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReactivateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Reactivates a Task, allowing it to run again even if its retry count has been + /// exhausted. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job containing the Task. + /// The ID of the Task to reactivate. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReactivateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReactivateTask"); + scope.Start(); + try + { + using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes the specified Task file from the Compute Node where the Task ran. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// Whether to delete children of a directory. If the filePath parameter represents + /// a directory instead of a file, you can set recursive to true to delete the + /// directory and all of the files and subdirectories in it. If recursive is false + /// then the directory must be empty or deletion will fail. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes the specified Task file from the Compute Node where the Task ran. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// Whether to delete children of a directory. If the filePath parameter represents + /// a directory instead of a file, you can set recursive to true to delete the + /// directory and all of the files and subdirectories in it. If recursive is false + /// then the directory must be empty or deletion will fail. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns the content of the specified Task file. + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetTaskFileAsync(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(response.Content, response); + } + + /// Returns the content of the specified Task file. + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetTaskFile(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return Response.FromValue(response.Content, response); + } + + /// + /// [Protocol Method] Returns the content of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns the content of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFile"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual async Task GetTaskFilePropertiesInternalAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFilePropertiesInternal"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Task file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose file you want to retrieve. + /// The path to the Task file that you want to get the content of. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual Response GetTaskFilePropertiesInternal(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetTaskFilePropertiesInternal"); + scope.Start(); + try + { + using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Adds a user Account to the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The options to use for creating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// You can add a user Account to a Compute Node only when it is in the idle or + /// running state. + /// + /// + public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(user, nameof(user)); + + using RequestContent content = user.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await CreateNodeUserAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Adds a user Account to the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The options to use for creating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// You can add a user Account to a Compute Node only when it is in the idle or + /// running state. + /// + /// + public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(user, nameof(user)); + + using RequestContent content = user.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = CreateNodeUser(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Adds a user Account to the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Adds a user Account to the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to create a user Account. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response CreateNodeUser(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.CreateNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a user Account from the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to delete a user Account. + /// The name of the user Account to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteNodeUserAsync(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes a user Account from the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to delete a user Account. + /// The name of the user Account to delete. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteNodeUser(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Updates the password and expiration time of a user Account on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The options to use for updating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + /// This operation replaces of all the updatable properties of the Account. For + /// example, if the expiryTime element is not specified, the current value is + /// replaced with the default value, not left unmodified. You can update a user + /// Account on a Compute Node only when it is in the idle or running state. + /// + /// + public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await ReplaceNodeUserAsync(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Updates the password and expiration time of a user Account on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The options to use for updating the user. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + /// This operation replaces of all the updatable properties of the Account. For + /// example, if the expiryTime element is not specified, the current value is + /// replaced with the default value, not left unmodified. You can update a user + /// Account on a Compute Node only when it is in the idle or running state. + /// + /// + public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = ReplaceNodeUser(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Updates the password and expiration time of a user Account on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Updates the password and expiration time of a user Account on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the machine on which you want to update a user Account. + /// The name of the user Account to update. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(userName, nameof(userName)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReplaceNodeUser"); + scope.Start(); + try + { + using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets information about the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetNodeAsync(poolId, nodeId, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + return Response.FromValue(BatchNode.FromResponse(response), response); + } + + /// Gets information about the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetNode(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + return Response.FromValue(BatchNode.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNode"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNode"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Restarts the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to restart. + /// The options to use for rebooting the Compute Node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// You can restart a Compute Node only if it is in an idle or running state. + /// + public virtual async Task RebootNodeAsync(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await RebootNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Restarts the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to restart. + /// The options to use for rebooting the Compute Node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// You can restart a Compute Node only if it is in an idle or running state. + /// + public virtual Response RebootNode(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = RebootNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Restarts the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to restart. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task RebootNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + scope.Start(); + try + { + using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Restarts the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that you want to restart. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response RebootNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + scope.Start(); + try + { + using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Disables Task scheduling on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node on which you want to disable Task scheduling. + /// The options to use for disabling scheduling on the Compute Node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// You can disable Task scheduling on a Compute Node only if its current + /// scheduling state is enabled. + /// + /// + public virtual async Task DisableNodeSchedulingAsync(string poolId, string nodeId, BatchNodeDisableSchedulingContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await DisableNodeSchedulingAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return response; + } + + /// Disables Task scheduling on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node on which you want to disable Task scheduling. + /// The options to use for disabling scheduling on the Compute Node. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// You can disable Task scheduling on a Compute Node only if its current + /// scheduling state is enabled. + /// + /// + public virtual Response DisableNodeScheduling(string poolId, string nodeId, BatchNodeDisableSchedulingContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using RequestContent content = parameters?.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = DisableNodeScheduling(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return response; + } + + /// + /// [Protocol Method] Disables Task scheduling on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node on which you want to disable Task scheduling. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DisableNodeSchedulingAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableNodeScheduling"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableNodeSchedulingRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Disables Task scheduling on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node on which you want to disable Task scheduling. + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DisableNodeScheduling(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableNodeScheduling"); + scope.Start(); + try + { + using HttpMessage message = CreateDisableNodeSchedulingRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Enables Task scheduling on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node on which you want to enable Task scheduling. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task EnableNodeSchedulingAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableNodeScheduling"); + scope.Start(); + try + { + using HttpMessage message = CreateEnableNodeSchedulingRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Enables Task scheduling on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node on which you want to enable Task scheduling. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response EnableNodeScheduling(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableNodeScheduling"); + scope.Start(); + try + { + using HttpMessage message = CreateEnableNodeSchedulingRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets the settings required for remote login to a Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node for which to obtain the remote login settings. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// Before you can remotely login to a Compute Node using the remote login + /// settings, you must create a user Account on the Compute Node. This API can be + /// invoked only on Pools created with the virtual machine configuration property. + /// + /// + public virtual async Task> GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetNodeRemoteLoginSettingsAsync(poolId, nodeId, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(BatchNodeRemoteLoginSettings.FromResponse(response), response); + } + + /// Gets the settings required for remote login to a Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node for which to obtain the remote login settings. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// Before you can remotely login to a Compute Node using the remote login + /// settings, you must create a user Account on the Compute Node. This API can be + /// invoked only on Pools created with the virtual machine configuration property. + /// + /// + public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetNodeRemoteLoginSettings(poolId, nodeId, timeOutInSeconds, ocpdate, context); + return Response.FromValue(BatchNodeRemoteLoginSettings.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets the settings required for remote login to a Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node for which to obtain the remote login settings. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeRemoteLoginSettings"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeRemoteLoginSettingsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets the settings required for remote login to a Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node for which to obtain the remote login settings. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeRemoteLoginSettings"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeRemoteLoginSettingsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// Upload Azure Batch service log files from the specified Compute Node to Azure + /// Blob Storage. + /// + /// The ID of the Pool that contains the Compute Node. + /// + /// The ID of the Compute Node for which you want to get the Remote Desktop + /// Protocol file. + /// + /// The Azure Batch service log files upload options. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// This is for gathering Azure Batch service log files in an automated fashion + /// from Compute Nodes if you are experiencing an error and wish to escalate to + /// Azure support. The Azure Batch service log files should be shared with Azure + /// support to aid in debugging issues with the Batch service. + /// + /// + public virtual async Task> UploadNodeLogsAsync(string poolId, string nodeId, UploadBatchServiceLogsContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await UploadNodeLogsAsync(poolId, nodeId, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + return Response.FromValue(UploadBatchServiceLogsResult.FromResponse(response), response); + } + + /// + /// Upload Azure Batch service log files from the specified Compute Node to Azure + /// Blob Storage. + /// + /// The ID of the Pool that contains the Compute Node. + /// + /// The ID of the Compute Node for which you want to get the Remote Desktop + /// Protocol file. + /// + /// The Azure Batch service log files upload options. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The cancellation token to use. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// + /// This is for gathering Azure Batch service log files in an automated fashion + /// from Compute Nodes if you are experiencing an error and wish to escalate to + /// Azure support. The Azure Batch service log files should be shared with Azure + /// support to aid in debugging issues with the Batch service. + /// + /// + public virtual Response UploadNodeLogs(string poolId, string nodeId, UploadBatchServiceLogsContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using RequestContent content0 = content.ToRequestContent(); + RequestContext context = FromCancellationToken(cancellationToken); + Response response = UploadNodeLogs(poolId, nodeId, content0, timeOutInSeconds, ocpdate, context); + return Response.FromValue(UploadBatchServiceLogsResult.FromResponse(response), response); + } + + /// + /// [Protocol Method] Upload Azure Batch service log files from the specified Compute Node to Azure + /// Blob Storage. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// + /// The ID of the Compute Node for which you want to get the Remote Desktop + /// Protocol file. + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task UploadNodeLogsAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UploadNodeLogs"); + scope.Start(); + try + { + using HttpMessage message = CreateUploadNodeLogsRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Upload Azure Batch service log files from the specified Compute Node to Azure + /// Blob Storage. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// + /// The ID of the Compute Node for which you want to get the Remote Desktop + /// Protocol file. + /// + /// The content to send as the body of the request. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response UploadNodeLogs(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNull(content, nameof(content)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.UploadNodeLogs"); + scope.Start(); + try + { + using HttpMessage message = CreateUploadNodeLogsRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Gets information about the specified Compute Node Extension. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that contains the extensions. + /// The name of the Compute Node Extension that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(extensionName, nameof(extensionName)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetNodeExtensionAsync(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + return Response.FromValue(BatchNodeVMExtension.FromResponse(response), response); + } + + /// Gets information about the specified Compute Node Extension. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that contains the extensions. + /// The name of the Compute Node Extension that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual Response GetNodeExtension(string poolId, string nodeId, string extensionName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(extensionName, nameof(extensionName)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetNodeExtension(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context); + return Response.FromValue(BatchNodeVMExtension.FromResponse(response), response); + } + + /// + /// [Protocol Method] Gets information about the specified Compute Node Extension. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that contains the extensions. + /// The name of the Compute Node Extension that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(extensionName, nameof(extensionName)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeExtension"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeExtensionRequest(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Gets information about the specified Compute Node Extension. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node that contains the extensions. + /// The name of the Compute Node Extension that you want to get information about. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetNodeExtension(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(extensionName, nameof(extensionName)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeExtension"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeExtensionRequest(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes the specified file from the Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// Whether to delete children of a directory. If the filePath parameter represents + /// a directory instead of a file, you can set recursive to true to delete the + /// directory and all of the files and subdirectories in it. If recursive is false + /// then the directory must be empty or deletion will fail. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task DeleteNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeFile"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, recursive, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Deletes the specified file from the Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// Whether to delete children of a directory. If the filePath parameter represents + /// a directory instead of a file, you can set recursive to true to delete the + /// directory and all of the files and subdirectories in it. If recursive is false + /// then the directory must be empty or deletion will fail. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response DeleteNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteNodeFile"); + scope.Start(); + try + { + using HttpMessage message = CreateDeleteNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, recursive, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Returns the content of the specified Compute Node file. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual async Task> GetNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = await GetNodeFileAsync(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context).ConfigureAwait(false); + return Response.FromValue(response.Content, response); + } + + /// Returns the content of the specified Compute Node file. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The cancellation token to use. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// + public virtual Response GetNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + RequestContext context = FromCancellationToken(cancellationToken); + Response response = GetNodeFile(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return Response.FromValue(response.Content, response); + } + + /// + /// [Protocol Method] Returns the content of the specified Compute Node file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual async Task GetNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeFile"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// + /// [Protocol Method] Returns the content of the specified Compute Node file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The byte range to be retrieved. The default is to retrieve the entire file. The + /// format is bytes=startRange-endRange. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + /// + public virtual Response GetNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeFile"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Compute Node file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual async Task GetNodeFilePropertiesInternalAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeFilePropertiesInternal"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeFilePropertiesInternalRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets the properties of the specified Compute Node file. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node. + /// The path to the file or directory. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// , or is null. + /// , or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual Response GetNodeFilePropertiesInternal(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); + + if (requestConditions?.IfMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-Match header for this operation."); + } + if (requestConditions?.IfNoneMatch is not null) + { + throw new ArgumentNullException(nameof(requestConditions), "Service does not support the If-None-Match header for this operation."); + } + + using var scope = ClientDiagnostics.CreateScope("BatchClient.GetNodeFilePropertiesInternal"); + scope.Start(); + try + { + using HttpMessage message = CreateGetNodeFilePropertiesInternalRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + /// Lists all of the applications available in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// The cancellation token to use. + /// + /// This operation returns only Applications and versions that are available for + /// use on Compute Nodes; that is, that can be used in an Package reference. For + /// administrator information about applications and versions that are not yet + /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager + /// API. + /// + /// + public virtual AsyncPageable GetApplicationsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchApplication.DeserializeBatchApplication(e), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); + } + + /// Lists all of the applications available in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// The cancellation token to use. + /// + /// This operation returns only Applications and versions that are available for + /// use on Compute Nodes; that is, that can be used in an Package reference. For + /// administrator information about applications and versions that are not yet + /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager + /// API. + /// + /// + public virtual Pageable GetApplications(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchApplication.DeserializeBatchApplication(e), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the applications available in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetApplicationsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the applications available in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetApplications(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); + } + + /// + /// Lists the usage metrics, aggregated by Pool across individual time intervals, + /// for the specified Account. + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// The earliest time from which to include metrics. This must be at least two and + /// a half hours before the current time. If not specified this defaults to the + /// start time of the last aggregation interval currently available. + /// + /// + /// The latest time from which to include metrics. This must be at least two hours + /// before the current time. If not specified this defaults to the end time of the + /// last aggregation interval currently available. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// + /// The cancellation token to use. + /// + /// If you do not specify a $filter clause including a poolId, the response + /// includes all Pools that existed in the Account in the time range of the + /// returned aggregation intervals. If you do not specify a $filter clause + /// including a startTime or endTime these filters default to the start and end + /// times of the last aggregation interval currently available; that is, only the + /// last aggregation interval is returned. + /// + /// + public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, DateTimeOffset? starttime = null, DateTimeOffset? endtime = null, string filter = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPoolUsageMetrics.DeserializeBatchPoolUsageMetrics(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); + } + + /// + /// Lists the usage metrics, aggregated by Pool across individual time intervals, + /// for the specified Account. + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// The earliest time from which to include metrics. This must be at least two and + /// a half hours before the current time. If not specified this defaults to the + /// start time of the last aggregation interval currently available. + /// + /// + /// The latest time from which to include metrics. This must be at least two hours + /// before the current time. If not specified this defaults to the end time of the + /// last aggregation interval currently available. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// + /// The cancellation token to use. + /// + /// If you do not specify a $filter clause including a poolId, the response + /// includes all Pools that existed in the Account in the time range of the + /// returned aggregation intervals. If you do not specify a $filter clause + /// including a startTime or endTime these filters default to the start and end + /// times of the last aggregation interval currently available; that is, only the + /// last aggregation interval is returned. + /// + /// + public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, DateTimeOffset? starttime = null, DateTimeOffset? endtime = null, string filter = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchPoolUsageMetrics.DeserializeBatchPoolUsageMetrics(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the usage metrics, aggregated by Pool across individual time intervals, + /// for the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// The earliest time from which to include metrics. This must be at least two and + /// a half hours before the current time. If not specified this defaults to the + /// start time of the last aggregation interval currently available. + /// + /// + /// The latest time from which to include metrics. This must be at least two hours + /// before the current time. If not specified this defaults to the end time of the + /// last aggregation interval currently available. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the usage metrics, aggregated by Pool across individual time intervals, + /// for the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// The earliest time from which to include metrics. This must be at least two and + /// a half hours before the current time. If not specified this defaults to the + /// start time of the last aggregation interval currently available. + /// + /// + /// The latest time from which to include metrics. This must be at least two hours + /// before the current time. If not specified this defaults to the end time of the + /// last aggregation interval currently available. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); + } + + /// Lists all of the Pools in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// + public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPool.DeserializeBatchPool(e), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); + } + + /// Lists all of the Pools in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// + public virtual Pageable GetPools(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchPool.DeserializeBatchPool(e), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Pools in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Pools in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-pools. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetPools(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); + } + + /// Lists all Virtual Machine Images supported by the Azure Batch service. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The cancellation token to use. + /// + public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchSupportedImage.DeserializeBatchSupportedImage(e), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); + } + + /// Lists all Virtual Machine Images supported by the Azure Batch service. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The cancellation token to use. + /// + public virtual Pageable GetSupportedImages(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchSupportedImage.DeserializeBatchSupportedImage(e), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all Virtual Machine Images supported by the Azure Batch service. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all Virtual Machine Images supported by the Azure Batch service. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetSupportedImages(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); + } + + /// + /// Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + /// numbers returned may not always be up to date. If you need exact node counts, + /// use a list query. + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The cancellation token to use. + /// + public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPoolNodeCounts.DeserializeBatchPoolNodeCounts(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); + } + + /// + /// Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + /// numbers returned may not always be up to date. If you need exact node counts, + /// use a list query. + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The cancellation token to use. + /// + public virtual Pageable GetPoolNodeCounts(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchPoolNodeCounts.DeserializeBatchPoolNodeCounts(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + /// numbers returned may not always be up to date. If you need exact node counts, + /// use a list query. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Gets the number of Compute Nodes in each state, grouped by Pool. Note that the + /// numbers returned may not always be up to date. If you need exact node counts, + /// use a list query. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-support-images. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetPoolNodeCounts(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); + } + + /// Lists all of the Jobs in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// + public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); + } + + /// Lists all of the Jobs in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// + public virtual Pageable GetJobs(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Jobs in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Jobs in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetJobs(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); + } + + /// Lists the Jobs that have been created under the specified Job Schedule. + /// The ID of the Job Schedule from which you want to get a list of Jobs. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); + } + + /// Lists the Jobs that have been created under the specified Job Schedule. + /// The ID of the Job Schedule from which you want to get a list of Jobs. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the Jobs that have been created under the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job Schedule from which you want to get a list of Jobs. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the Jobs that have been created under the specified Job Schedule. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job Schedule from which you want to get a list of Jobs. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-jobs-in-a-job-schedule. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); + } + + /// + /// Lists the execution status of the Job Preparation and Job Release Task for the + /// specified Job across the Compute Nodes where the Job has run. + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This API returns the Job Preparation and Job Release Task status on all Compute + /// Nodes that have run the Job Preparation or Job Release Task. This includes + /// Compute Nodes which have since been removed from the Pool. If this API is + /// invoked on a Job which has no Job Preparation or Job Release Task, the Batch + /// service returns HTTP status code 409 (Conflict) with an error code of + /// JobPreparationTaskNotSpecified. + /// + /// + public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJobPreparationAndReleaseTaskStatus.DeserializeBatchJobPreparationAndReleaseTaskStatus(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + } + + /// + /// Lists the execution status of the Job Preparation and Job Release Task for the + /// specified Job across the Compute Nodes where the Job has run. + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// This API returns the Job Preparation and Job Release Task status on all Compute + /// Nodes that have run the Job Preparation or Job Release Task. This includes + /// Compute Nodes which have since been removed from the Pool. If this API is + /// invoked on a Job which has no Job Preparation or Job Release Task, the Batch + /// service returns HTTP status code 409 (Conflict) with an error code of + /// JobPreparationTaskNotSpecified. + /// + /// + public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJobPreparationAndReleaseTaskStatus.DeserializeBatchJobPreparationAndReleaseTaskStatus(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the execution status of the Job Preparation and Job Release Task for the + /// specified Job across the Compute Nodes where the Job has run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the execution status of the Job Preparation and Job Release Task for the + /// specified Job across the Compute Nodes where the Job has run. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-preparation-and-release-status. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); + } + + /// Lists all of the Job Schedules in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// + public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJobSchedule.DeserializeBatchJobSchedule(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); + } + + /// Lists all of the Job Schedules in the specified Account. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// + public virtual Pageable GetJobSchedules(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJobSchedule.DeserializeBatchJobSchedule(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Job Schedules in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Job Schedules in the specified Account. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-job-schedules. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetJobSchedules(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); + } + + /// Lists all of the Tasks that are associated with the specified Job. + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// For multi-instance Tasks, information such as affinityId, executionInfo and + /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + /// information about subtasks. + /// + /// + public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchTask.DeserializeBatchTask(e), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); + } + + /// Lists all of the Tasks that are associated with the specified Job. + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + /// For multi-instance Tasks, information such as affinityId, executionInfo and + /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve + /// information about subtasks. + /// + /// + public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchTask.DeserializeBatchTask(e), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Tasks that are associated with the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the Tasks that are associated with the specified Job. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-tasks. + /// + /// An OData $select clause. + /// An OData $expand clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); + } + + /// + /// Lists all of the subtasks that are associated with the specified multi-instance + /// Task. + /// + /// The ID of the Job. + /// The ID of the Task. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// If the Task is not a multi-instance Task then this returns an empty collection. + /// + public virtual AsyncPageable GetSubTasksAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchSubtask.DeserializeBatchSubtask(e), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); + } + + /// + /// Lists all of the subtasks that are associated with the specified multi-instance + /// Task. + /// + /// The ID of the Job. + /// The ID of the Task. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// If the Task is not a multi-instance Task then this returns an empty collection. + /// + public virtual Pageable GetSubTasks(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchSubtask.DeserializeBatchSubtask(e), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the subtasks that are associated with the specified multi-instance + /// Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The ID of the Task. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetSubTasksAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the subtasks that are associated with the specified multi-instance + /// Task. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job. + /// The ID of the Task. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetSubTasks(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); + } + + /// Lists the files in a Task's directory on its Compute Node. + /// The ID of the Job that contains the Task. + /// The ID of the Task whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// + /// + /// Whether to list children of the Task directory. This parameter can be used in + /// combination with the filter parameter to list specific type of files. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual AsyncPageable GetTaskFilesAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); + } + + /// Lists the files in a Task's directory on its Compute Node. + /// The ID of the Job that contains the Task. + /// The ID of the Task whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// + /// + /// Whether to list children of the Task directory. This parameter can be used in + /// combination with the filter parameter to list specific type of files. + /// + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Pageable GetTaskFiles(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the files in a Task's directory on its Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// + /// + /// Whether to list children of the Task directory. This parameter can be used in + /// combination with the filter parameter to list specific type of files. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetTaskFilesAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the files in a Task's directory on its Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Job that contains the Task. + /// The ID of the Task whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-task-files. + /// + /// + /// Whether to list children of the Task directory. This parameter can be used in + /// combination with the filter parameter to list specific type of files. + /// + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetTaskFiles(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); + Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); + } + + /// Lists the Compute Nodes in the specified Pool. + /// The ID of the Pool from which you want to list Compute Nodes. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual AsyncPageable GetNodesAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNode.DeserializeBatchNode(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); + } + + /// Lists the Compute Nodes in the specified Pool. + /// The ID of the Pool from which you want to list Compute Nodes. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// + public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNode.DeserializeBatchNode(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the Compute Nodes in the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool from which you want to list Compute Nodes. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetNodesAsync(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the Compute Nodes in the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool from which you want to list Compute Nodes. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-nodes-in-a-pool. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); + } + + /// Lists the Compute Nodes Extensions in the specified Pool. + /// The ID of the Pool that contains Compute Node. + /// The ID of the Compute Node that you want to list extensions. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNodeVMExtension.DeserializeBatchNodeVMExtension(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); + } + + /// Lists the Compute Nodes Extensions in the specified Pool. + /// The ID of the Pool that contains Compute Node. + /// The ID of the Compute Node that you want to list extensions. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// An OData $select clause. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Pageable GetNodeExtensions(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, IEnumerable select = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNodeVMExtension.DeserializeBatchNodeVMExtension(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the Compute Nodes Extensions in the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains Compute Node. + /// The ID of the Compute Node that you want to list extensions. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists the Compute Nodes Extensions in the specified Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains Compute Node. + /// The ID of the Compute Node that you want to list extensions. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// An OData $select clause. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetNodeExtensions(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); + } + + /// Lists all of the files in Task directories on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// + /// Whether to list children of a directory. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); + } + + /// Lists all of the files in Task directories on the specified Compute Node. + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// + /// Whether to list children of a directory. + /// The cancellation token to use. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// + public virtual Pageable GetNodeFiles(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the files in Task directories on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// + /// Whether to list children of a directory. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); + } + + /// + /// [Protocol Method] Lists all of the files in Task directories on the specified Compute Node. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// Please try the simpler convenience overload with strongly typed models first. + /// + /// + /// + /// + /// The ID of the Pool that contains the Compute Node. + /// The ID of the Compute Node whose files you want to list. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// + /// The maximum number of items to return in the response. A maximum of 1000 + /// applications can be returned. + /// + /// + /// An OData $filter clause. For more information on constructing this filter, see + /// https://docs.microsoft.com/en-us/rest/api/batchservice/odata-filters-in-batch#list-compute-node-files. + /// + /// Whether to list children of a directory. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// or is null. + /// or is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. + /// + public virtual Pageable GetNodeFiles(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); + + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); + } + + internal HttpMessage CreateGetApplicationsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/applications", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetApplicationRequest(string applicationId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/applications/", false); + uri.AppendPath(applicationId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetPoolUsageMetricsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/poolusagemetrics", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (starttime != null) + { + uri.AppendQuery("startTime", starttime.Value, "O", true); + } + if (endtime != null) + { + uri.AppendQuery("endtime", endtime.Value, "O", true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateCreatePoolRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier201); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetPoolsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateDeletePoolRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreatePoolExistsRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200404); + var request = message.Request; + request.Method = RequestMethod.Head; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetPoolRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateUpdatePoolRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Patch; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDisablePoolAutoScaleRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/disableautoscale", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateEnablePoolAutoScaleRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/enableautoscale", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateEvaluatePoolAutoScaleRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/evaluateautoscale", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateResizePoolRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/resize", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateStopPoolResizeRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/stopresize", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateReplacePoolPropertiesRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier204); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/updateproperties", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateRemoveNodesRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/removenodes", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetSupportedImagesRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/supportedimages", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetPoolNodeCountsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/nodecounts", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateUpdateJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Patch; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateReplaceJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Put; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDisableJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/disable", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateEnableJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/enable", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/terminate", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateCreateJobRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier201); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetJobsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobsFromSchedulesRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendPath("/jobs", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/jobpreparationandreleasetaskstatus", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobTaskCountsRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/taskcounts", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200404); + var request = message.Request; + request.Method = RequestMethod.Head; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateUpdateJobScheduleRequest(string jobScheduleId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Patch; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateReplaceJobScheduleRequest(string jobScheduleId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Put; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDisableJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier204); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendPath("/disable", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateEnableJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier204); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendPath("/enable", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules/", false); + uri.AppendPath(jobScheduleId, true); + uri.AppendPath("/terminate", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateCreateJobScheduleRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier201); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetJobSchedulesRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobschedules", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateCreateTaskRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier201); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetTasksRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateCreateTaskCollectionRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/addtaskcollection", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDeleteTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + if (expand != null && !(expand is ChangeTrackingList changeTrackingList0 && changeTrackingList0.IsUndefined)) + { + uri.AppendQueryDelimited("$expand", expand, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateReplaceTaskRequest(string jobId, string taskId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Put; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetSubTasksRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/subtasksinfo", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateTerminateTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier204); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/terminate", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateReactivateTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier204); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/reactivate", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateDeleteTaskFileRequest(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? recursive, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(filePath, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (recursive != null) + { + uri.AppendQuery("recursive", recursive.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetTaskFileRequest(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(filePath, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/octet-stream"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (ocpRange != null) + { + request.Headers.Add("ocp-range", ocpRange); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetTaskFilePropertiesInternalRequest(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Head; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(filePath, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetTaskFilesRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/jobs/", false); + uri.AppendPath(jobId, true); + uri.AppendPath("/tasks/", false); + uri.AppendPath(taskId, true); + uri.AppendPath("/files", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (recursive != null) + { + uri.AppendQuery("recursive", recursive.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateCreateNodeUserRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier201); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/users", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDeleteNodeUserRequest(string poolId, string nodeId, string userName, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/users/", false); + uri.AppendPath(userName, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateReplaceNodeUserRequest(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Put; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/users/", false); + uri.AppendPath(userName, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetNodeRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateRebootNodeRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier202); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/reboot", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateDisableNodeSchedulingRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/disablescheduling", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateEnableNodeSchedulingRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/enablescheduling", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeRemoteLoginSettingsRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/remoteloginsettings", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateUploadNodeLogsRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Post; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/uploadbatchservicelogs", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + request.Headers.Add("content-type", "application/json; odata=minimalmetadata"); + request.Content = content; + return message; + } + + internal HttpMessage CreateGetNodesRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeExtensionRequest(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/extensions/", false); + uri.AppendPath(extensionName, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeExtensionsRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/extensions", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) + { + uri.AppendQueryDelimited("$select", select, ",", true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateDeleteNodeFileRequest(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? recursive, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Delete; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(filePath, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (recursive != null) + { + uri.AppendQuery("recursive", recursive.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeFileRequest(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(filePath, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/octet-stream"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (ocpRange != null) + { + request.Headers.Add("ocp-range", ocpRange); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeFilePropertiesInternalRequest(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Head; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/files/", false); + uri.AppendPath(filePath, true); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + if (requestConditions != null) + { + request.Headers.Add(requestConditions, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeFilesRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendPath("/pools/", false); + uri.AppendPath(poolId, true); + uri.AppendPath("/nodes/", false); + uri.AppendPath(nodeId, true); + uri.AppendPath("/files", false); + uri.AppendQuery("api-version", _apiVersion, true); + if (timeOutInSeconds != null) + { + uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + } + if (maxresults != null) + { + uri.AppendQuery("maxresults", maxresults.Value, true); + } + if (filter != null) + { + uri.AppendQuery("$filter", filter, true); + } + if (recursive != null) + { + uri.AppendQuery("recursive", recursive.Value, true); + } + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetApplicationsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetPoolUsageMetricsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetPoolsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetSupportedImagesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetPoolNodeCountsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobsFromSchedulesNextPageRequest(string nextLink, string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(string nextLink, string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetJobSchedulesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetTasksNextPageRequest(string nextLink, string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetSubTasksNextPageRequest(string nextLink, string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetTaskFilesNextPageRequest(string nextLink, string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodesNextPageRequest(string nextLink, string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeExtensionsNextPageRequest(string nextLink, string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + internal HttpMessage CreateGetNodeFilesNextPageRequest(string nextLink, string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + { + var message = _pipeline.CreateMessage(context, ResponseClassifier200); + var request = message.Request; + request.Method = RequestMethod.Get; + var uri = new RawRequestUriBuilder(); + uri.Reset(_endpoint); + uri.AppendRawNextLink(nextLink, false); + request.Uri = uri; + request.Headers.Add("Accept", "application/json"); + request.Headers.Add("client-request-id", message.Request.ClientRequestId); + request.Headers.Add("return-client-request-id", "true"); + if (ocpdate != null) + { + request.Headers.Add("ocp-date", ocpdate.Value, "R"); + } + return message; + } + + private static RequestContext DefaultRequestContext = new RequestContext(); + internal static RequestContext FromCancellationToken(CancellationToken cancellationToken = default) + { + if (!cancellationToken.CanBeCanceled) + { + return DefaultRequestContext; + } + + return new RequestContext() { CancellationToken = cancellationToken }; + } + + private static ResponseClassifier _responseClassifier200; + private static ResponseClassifier ResponseClassifier200 => _responseClassifier200 ??= new StatusCodeClassifier(stackalloc ushort[] { 200 }); + private static ResponseClassifier _responseClassifier201; + private static ResponseClassifier ResponseClassifier201 => _responseClassifier201 ??= new StatusCodeClassifier(stackalloc ushort[] { 201 }); + private static ResponseClassifier _responseClassifier202; + private static ResponseClassifier ResponseClassifier202 => _responseClassifier202 ??= new StatusCodeClassifier(stackalloc ushort[] { 202 }); + private static ResponseClassifier _responseClassifier200404; + private static ResponseClassifier ResponseClassifier200404 => _responseClassifier200404 ??= new StatusCodeClassifier(stackalloc ushort[] { 200, 404 }); + private static ResponseClassifier _responseClassifier204; + private static ResponseClassifier ResponseClassifier204 => _responseClassifier204 ??= new StatusCodeClassifier(stackalloc ushort[] { 204 }); + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs new file mode 100644 index 0000000000000..eab2e5942414c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClientOptions.cs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + /// Client options for BatchClient. + public partial class BatchClientOptions : ClientOptions + { + private const ServiceVersion LatestVersion = ServiceVersion.V2024_02_01_19_0; + + /// The version of the service to use. + public enum ServiceVersion + { + /// Service version "2024-02-01.19.0". + V2024_02_01_19_0 = 1, + } + + internal string Version { get; } + + /// Initializes new instance of BatchClientOptions. + public BatchClientOptions(ServiceVersion version = LatestVersion) + { + Version = version switch + { + ServiceVersion.V2024_02_01_19_0 => "2024-02-01.19.0", + _ => throw new NotSupportedException() + }; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchError.Serialization.cs new file mode 100644 index 0000000000000..116c883bb17c8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchError.Serialization.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchError : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchError)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteObjectValue(Message, options); + } + if (Optional.IsCollectionDefined(Values)) + { + writer.WritePropertyName("values"u8); + writer.WriteStartArray(); + foreach (var item in Values) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchError)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchError(document.RootElement, options); + } + + internal static BatchError DeserializeBatchError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + BatchErrorMessage message = default; + IReadOnlyList values = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + message = BatchErrorMessage.DeserializeBatchErrorMessage(property.Value, options); + continue; + } + if (property.NameEquals("values"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchErrorDetail.DeserializeBatchErrorDetail(item, options)); + } + values = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchError)} does not support writing '{options.Format}' format."); + } + } + + BatchError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchError)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchError FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchError(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchError.cs new file mode 100644 index 0000000000000..e1d78423d0af2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchError.cs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error response received from the Azure Batch service. + public partial class BatchError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + /// is null. + internal BatchError(string code) + { + Argument.AssertNotNull(code, nameof(code)); + + Code = code; + Values = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the error, intended to be suitable for display in a user interface. + /// A collection of key-value pairs containing additional details about the error. + /// Keeps track of any properties unknown to the library. + internal BatchError(string code, BatchErrorMessage message, IReadOnlyList values, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + Values = values; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchError() + { + } + + /// An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; } + /// A message describing the error, intended to be suitable for display in a user interface. + public BatchErrorMessage Message { get; } + /// A collection of key-value pairs containing additional details about the error. + public IReadOnlyList Values { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorDetail.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorDetail.Serialization.cs new file mode 100644 index 0000000000000..d00a4bf014bf3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorDetail.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchErrorDetail : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchErrorDetail)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Key)) + { + writer.WritePropertyName("key"u8); + writer.WriteStringValue(Key); + } + if (Optional.IsDefined(Value)) + { + writer.WritePropertyName("value"u8); + writer.WriteStringValue(Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchErrorDetail IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchErrorDetail)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchErrorDetail(document.RootElement, options); + } + + internal static BatchErrorDetail DeserializeBatchErrorDetail(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string key = default; + string value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("key"u8)) + { + key = property.Value.GetString(); + continue; + } + if (property.NameEquals("value"u8)) + { + value = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchErrorDetail(key, value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchErrorDetail)} does not support writing '{options.Format}' format."); + } + } + + BatchErrorDetail IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchErrorDetail(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchErrorDetail)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchErrorDetail FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchErrorDetail(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorDetail.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorDetail.cs new file mode 100644 index 0000000000000..693126eae63ec --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorDetail.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An item of additional information included in an Azure Batch error response. + public partial class BatchErrorDetail + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchErrorDetail() + { + } + + /// Initializes a new instance of . + /// An identifier specifying the meaning of the Value property. + /// The additional information included with the error response. + /// Keeps track of any properties unknown to the library. + internal BatchErrorDetail(string key, string value, IDictionary serializedAdditionalRawData) + { + Key = key; + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An identifier specifying the meaning of the Value property. + public string Key { get; } + /// The additional information included with the error response. + public string Value { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorMessage.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorMessage.Serialization.cs new file mode 100644 index 0000000000000..cb1c107a40ccc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorMessage.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchErrorMessage : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchErrorMessage)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Lang)) + { + writer.WritePropertyName("lang"u8); + writer.WriteStringValue(Lang); + } + if (Optional.IsDefined(Value)) + { + writer.WritePropertyName("value"u8); + writer.WriteStringValue(Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchErrorMessage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchErrorMessage)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchErrorMessage(document.RootElement, options); + } + + internal static BatchErrorMessage DeserializeBatchErrorMessage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string lang = default; + string value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("lang"u8)) + { + lang = property.Value.GetString(); + continue; + } + if (property.NameEquals("value"u8)) + { + value = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchErrorMessage(lang, value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchErrorMessage)} does not support writing '{options.Format}' format."); + } + } + + BatchErrorMessage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchErrorMessage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchErrorMessage)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchErrorMessage FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchErrorMessage(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorMessage.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorMessage.cs new file mode 100644 index 0000000000000..ce2f513f2ff6e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorMessage.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error message received in an Azure Batch error response. + public partial class BatchErrorMessage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchErrorMessage() + { + } + + /// Initializes a new instance of . + /// The language code of the error message. + /// The text of the message. + /// Keeps track of any properties unknown to the library. + internal BatchErrorMessage(string lang, string value, IDictionary serializedAdditionalRawData) + { + Lang = lang; + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The language code of the error message. + public string Lang { get; } + /// The text of the message. + public string Value { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs new file mode 100644 index 0000000000000..70ba89ca2ada5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs @@ -0,0 +1,541 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJob : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJob)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (options.Format != "W" && Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (options.Format != "W" && Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (options.Format != "W" && Optional.IsDefined(UsesTaskDependencies)) + { + writer.WritePropertyName("usesTaskDependencies"u8); + writer.WriteBooleanValue(UsesTaskDependencies.Value); + } + if (options.Format != "W" && Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && Optional.IsDefined(ETag)) + { + writer.WritePropertyName("eTag"u8); + writer.WriteStringValue(ETag); + } + if (options.Format != "W" && Optional.IsDefined(LastModified)) + { + writer.WritePropertyName("lastModified"u8); + writer.WriteStringValue(LastModified.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(CreationTime)) + { + writer.WritePropertyName("creationTime"u8); + writer.WriteStringValue(CreationTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PreviousState)) + { + writer.WritePropertyName("previousState"u8); + writer.WriteStringValue(PreviousState.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(PreviousStateTransitionTime)) + { + writer.WritePropertyName("previousStateTransitionTime"u8); + writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); + } + if (Optional.IsDefined(Priority)) + { + writer.WritePropertyName("priority"u8); + writer.WriteNumberValue(Priority.Value); + } + if (Optional.IsDefined(AllowTaskPreemption)) + { + writer.WritePropertyName("allowTaskPreemption"u8); + writer.WriteBooleanValue(AllowTaskPreemption.Value); + } + if (Optional.IsDefined(MaxParallelTasks)) + { + writer.WritePropertyName("maxParallelTasks"u8); + writer.WriteNumberValue(MaxParallelTasks.Value); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (options.Format != "W" && Optional.IsDefined(JobManagerTask)) + { + writer.WritePropertyName("jobManagerTask"u8); + writer.WriteObjectValue(JobManagerTask, options); + } + if (options.Format != "W" && Optional.IsDefined(JobPreparationTask)) + { + writer.WritePropertyName("jobPreparationTask"u8); + writer.WriteObjectValue(JobPreparationTask, options); + } + if (options.Format != "W" && Optional.IsDefined(JobReleaseTask)) + { + writer.WritePropertyName("jobReleaseTask"u8); + writer.WriteObjectValue(JobReleaseTask, options); + } + if (options.Format != "W" && Optional.IsCollectionDefined(CommonEnvironmentSettings)) + { + writer.WritePropertyName("commonEnvironmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in CommonEnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + writer.WritePropertyName("poolInfo"u8); + writer.WriteObjectValue(PoolInfo, options); + if (Optional.IsDefined(OnAllTasksComplete)) + { + writer.WritePropertyName("onAllTasksComplete"u8); + writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(OnTaskFailure)) + { + writer.WritePropertyName("onTaskFailure"u8); + writer.WriteStringValue(OnTaskFailure.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(ExecutionInfo)) + { + writer.WritePropertyName("executionInfo"u8); + writer.WriteObjectValue(ExecutionInfo, options); + } + if (options.Format != "W" && Optional.IsDefined(Stats)) + { + writer.WritePropertyName("stats"u8); + writer.WriteObjectValue(Stats, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJob)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJob(document.RootElement, options); + } + + internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + bool? usesTaskDependencies = default; + string url = default; + string eTag = default; + DateTimeOffset? lastModified = default; + DateTimeOffset? creationTime = default; + BatchJobState? state = default; + DateTimeOffset? stateTransitionTime = default; + BatchJobState? previousState = default; + DateTimeOffset? previousStateTransitionTime = default; + int? priority = default; + bool? allowTaskPreemption = default; + int? maxParallelTasks = default; + BatchJobConstraints constraints = default; + BatchJobManagerTask jobManagerTask = default; + BatchJobPreparationTask jobPreparationTask = default; + BatchJobReleaseTask jobReleaseTask = default; + IReadOnlyList commonEnvironmentSettings = default; + BatchPoolInfo poolInfo = default; + OnAllBatchTasksComplete? onAllTasksComplete = default; + OnBatchTaskFailure? onTaskFailure = default; + BatchJobNetworkConfiguration networkConfiguration = default; + IList metadata = default; + BatchJobExecutionInfo executionInfo = default; + BatchJobStatistics stats = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("usesTaskDependencies"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usesTaskDependencies = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("eTag"u8)) + { + eTag = property.Value.GetString(); + continue; + } + if (property.NameEquals("lastModified"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastModified = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("creationTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + creationTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchJobState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("previousState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousState = new BatchJobState(property.Value.GetString()); + continue; + } + if (property.NameEquals("previousStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("priority"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + priority = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("allowTaskPreemption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allowTaskPreemption = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("maxParallelTasks"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxParallelTasks = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchJobConstraints.DeserializeBatchJobConstraints(property.Value, options); + continue; + } + if (property.NameEquals("jobManagerTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobManagerTask = BatchJobManagerTask.DeserializeBatchJobManagerTask(property.Value, options); + continue; + } + if (property.NameEquals("jobPreparationTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobPreparationTask = BatchJobPreparationTask.DeserializeBatchJobPreparationTask(property.Value, options); + continue; + } + if (property.NameEquals("jobReleaseTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobReleaseTask = BatchJobReleaseTask.DeserializeBatchJobReleaseTask(property.Value, options); + continue; + } + if (property.NameEquals("commonEnvironmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + commonEnvironmentSettings = array; + continue; + } + if (property.NameEquals("poolInfo"u8)) + { + poolInfo = BatchPoolInfo.DeserializeBatchPoolInfo(property.Value, options); + continue; + } + if (property.NameEquals("onAllTasksComplete"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + continue; + } + if (property.NameEquals("onTaskFailure"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onTaskFailure = new OnBatchTaskFailure(property.Value.GetString()); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = BatchJobNetworkConfiguration.DeserializeBatchJobNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("executionInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + executionInfo = BatchJobExecutionInfo.DeserializeBatchJobExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("stats"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stats = BatchJobStatistics.DeserializeBatchJobStatistics(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJob( + id, + displayName, + usesTaskDependencies, + url, + eTag, + lastModified, + creationTime, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + priority, + allowTaskPreemption, + maxParallelTasks, + constraints, + jobManagerTask, + jobPreparationTask, + jobReleaseTask, + commonEnvironmentSettings ?? new ChangeTrackingList(), + poolInfo, + onAllTasksComplete, + onTaskFailure, + networkConfiguration, + metadata ?? new ChangeTrackingList(), + executionInfo, + stats, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJob)} does not support writing '{options.Format}' format."); + } + } + + BatchJob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJob)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJob FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJob(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs new file mode 100644 index 0000000000000..b516fefc12923 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An Azure Batch Job. + public partial class BatchJob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The Pool settings associated with the Job. + /// is null. + public BatchJob(BatchPoolInfo poolInfo) + { + Argument.AssertNotNull(poolInfo, nameof(poolInfo)); + + CommonEnvironmentSettings = new ChangeTrackingList(); + PoolInfo = poolInfo; + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the Job. + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + /// The URL of the Job. + /// The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. + /// The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. + /// The creation time of the Job. + /// The current state of the Job. + /// The time at which the Job entered its current state. + /// The previous state of the Job. This property is not set if the Job is in its initial Active state. + /// The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The execution constraints for the Job. + /// Details of a Job Manager Task to be launched when the Job is started. + /// The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. + /// The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. + /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + /// The Pool settings associated with the Job. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The network configuration for the Job. + /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// The execution information for the Job. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Keeps track of any properties unknown to the library. + internal BatchJob(string id, string displayName, bool? usesTaskDependencies, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobState? state, DateTimeOffset? stateTransitionTime, BatchJobState? previousState, DateTimeOffset? previousStateTransitionTime, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IReadOnlyList commonEnvironmentSettings, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, IList metadata, BatchJobExecutionInfo executionInfo, BatchJobStatistics stats, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + UsesTaskDependencies = usesTaskDependencies; + Url = url; + ETag = eTag; + LastModified = lastModified; + CreationTime = creationTime; + State = state; + StateTransitionTime = stateTransitionTime; + PreviousState = previousState; + PreviousStateTransitionTime = previousStateTransitionTime; + Priority = priority; + AllowTaskPreemption = allowTaskPreemption; + MaxParallelTasks = maxParallelTasks; + Constraints = constraints; + JobManagerTask = jobManagerTask; + JobPreparationTask = jobPreparationTask; + JobReleaseTask = jobReleaseTask; + CommonEnvironmentSettings = commonEnvironmentSettings; + PoolInfo = poolInfo; + OnAllTasksComplete = onAllTasksComplete; + OnTaskFailure = onTaskFailure; + NetworkConfiguration = networkConfiguration; + Metadata = metadata; + ExecutionInfo = executionInfo; + Stats = stats; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJob() + { + } + + /// A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + public string Id { get; } + /// The display name for the Job. + public string DisplayName { get; } + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + public bool? UsesTaskDependencies { get; } + /// The URL of the Job. + public string Url { get; } + /// The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. + public string ETag { get; } + /// The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. + public DateTimeOffset? LastModified { get; } + /// The creation time of the Job. + public DateTimeOffset? CreationTime { get; } + /// The current state of the Job. + public BatchJobState? State { get; } + /// The time at which the Job entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The previous state of the Job. This property is not set if the Job is in its initial Active state. + public BatchJobState? PreviousState { get; } + /// The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. + public DateTimeOffset? PreviousStateTransitionTime { get; } + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + public int? Priority { get; set; } + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + public bool? AllowTaskPreemption { get; set; } + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + public int? MaxParallelTasks { get; set; } + /// The execution constraints for the Job. + public BatchJobConstraints Constraints { get; set; } + /// Details of a Job Manager Task to be launched when the Job is started. + public BatchJobManagerTask JobManagerTask { get; } + /// The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. + public BatchJobPreparationTask JobPreparationTask { get; } + /// The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. + public BatchJobReleaseTask JobReleaseTask { get; } + /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + public IReadOnlyList CommonEnvironmentSettings { get; } + /// The Pool settings associated with the Job. + public BatchPoolInfo PoolInfo { get; set; } + /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + public OnBatchTaskFailure? OnTaskFailure { get; } + /// The network configuration for the Job. + public BatchJobNetworkConfiguration NetworkConfiguration { get; } + /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + /// The execution information for the Job. + public BatchJobExecutionInfo ExecutionInfo { get; } + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + public BatchJobStatistics Stats { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobAction.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobAction.cs new file mode 100644 index 0000000000000..2b5d5b7efee00 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobAction.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchJobAction enums. + public readonly partial struct BatchJobAction : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchJobAction(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoneValue = "none"; + private const string DisableValue = "disable"; + private const string TerminateValue = "terminate"; + + /// Take no action. + public static BatchJobAction None { get; } = new BatchJobAction(NoneValue); + /// Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value of requeue. + public static BatchJobAction Disable { get; } = new BatchJobAction(DisableValue); + /// Terminate the Job. The terminationReason in the Job's executionInfo is set to "TaskFailed". + public static BatchJobAction Terminate { get; } = new BatchJobAction(TerminateValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobAction left, BatchJobAction right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobAction left, BatchJobAction right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchJobAction(string value) => new BatchJobAction(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchJobAction other && Equals(other); + /// + public bool Equals(BatchJobAction other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobConstraints.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobConstraints.Serialization.cs new file mode 100644 index 0000000000000..bfe4dceb06631 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobConstraints.Serialization.cs @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobConstraints : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobConstraints)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(MaxWallClockTime)) + { + writer.WritePropertyName("maxWallClockTime"u8); + writer.WriteStringValue(MaxWallClockTime.Value, "P"); + } + if (Optional.IsDefined(MaxTaskRetryCount)) + { + writer.WritePropertyName("maxTaskRetryCount"u8); + writer.WriteNumberValue(MaxTaskRetryCount.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobConstraints IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobConstraints)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobConstraints(document.RootElement, options); + } + + internal static BatchJobConstraints DeserializeBatchJobConstraints(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + TimeSpan? maxWallClockTime = default; + int? maxTaskRetryCount = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("maxWallClockTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxWallClockTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("maxTaskRetryCount"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxTaskRetryCount = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobConstraints(maxWallClockTime, maxTaskRetryCount, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobConstraints)} does not support writing '{options.Format}' format."); + } + } + + BatchJobConstraints IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobConstraints(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobConstraints)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobConstraints FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobConstraints(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobConstraints.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobConstraints.cs new file mode 100644 index 0000000000000..78592b83c7535 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobConstraints.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The execution constraints for a Job. + public partial class BatchJobConstraints + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchJobConstraints() + { + } + + /// Initializes a new instance of . + /// The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. + /// The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). + /// Keeps track of any properties unknown to the library. + internal BatchJobConstraints(TimeSpan? maxWallClockTime, int? maxTaskRetryCount, IDictionary serializedAdditionalRawData) + { + MaxWallClockTime = maxWallClockTime; + MaxTaskRetryCount = maxTaskRetryCount; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The maximum elapsed time that the Job may run, measured from the time the Job is created. If the Job does not complete within the time limit, the Batch service terminates it and any Tasks that are still running. In this case, the termination reason will be MaxWallClockTimeExpiry. If this property is not specified, there is no time limit on how long the Job may run. + public TimeSpan? MaxWallClockTime { get; set; } + /// The maximum number of times each Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try each Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries a Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry Tasks. If the maximum retry count is -1, the Batch service retries Tasks without limit. The default value is 0 (no retries). + public int? MaxTaskRetryCount { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.Serialization.cs new file mode 100644 index 0000000000000..b091245b29d55 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.Serialization.cs @@ -0,0 +1,386 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobCreateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (Optional.IsDefined(UsesTaskDependencies)) + { + writer.WritePropertyName("usesTaskDependencies"u8); + writer.WriteBooleanValue(UsesTaskDependencies.Value); + } + if (Optional.IsDefined(Priority)) + { + writer.WritePropertyName("priority"u8); + writer.WriteNumberValue(Priority.Value); + } + if (Optional.IsDefined(AllowTaskPreemption)) + { + writer.WritePropertyName("allowTaskPreemption"u8); + writer.WriteBooleanValue(AllowTaskPreemption.Value); + } + if (Optional.IsDefined(MaxParallelTasks)) + { + writer.WritePropertyName("maxParallelTasks"u8); + writer.WriteNumberValue(MaxParallelTasks.Value); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (Optional.IsDefined(JobManagerTask)) + { + writer.WritePropertyName("jobManagerTask"u8); + writer.WriteObjectValue(JobManagerTask, options); + } + if (Optional.IsDefined(JobPreparationTask)) + { + writer.WritePropertyName("jobPreparationTask"u8); + writer.WriteObjectValue(JobPreparationTask, options); + } + if (Optional.IsDefined(JobReleaseTask)) + { + writer.WritePropertyName("jobReleaseTask"u8); + writer.WriteObjectValue(JobReleaseTask, options); + } + if (Optional.IsCollectionDefined(CommonEnvironmentSettings)) + { + writer.WritePropertyName("commonEnvironmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in CommonEnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + writer.WritePropertyName("poolInfo"u8); + writer.WriteObjectValue(PoolInfo, options); + if (Optional.IsDefined(OnAllTasksComplete)) + { + writer.WritePropertyName("onAllTasksComplete"u8); + writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + } + if (Optional.IsDefined(OnTaskFailure)) + { + writer.WritePropertyName("onTaskFailure"u8); + writer.WriteStringValue(OnTaskFailure.Value.ToString()); + } + if (Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobCreateContent(document.RootElement, options); + } + + internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + bool? usesTaskDependencies = default; + int? priority = default; + bool? allowTaskPreemption = default; + int? maxParallelTasks = default; + BatchJobConstraints constraints = default; + BatchJobManagerTask jobManagerTask = default; + BatchJobPreparationTask jobPreparationTask = default; + BatchJobReleaseTask jobReleaseTask = default; + IList commonEnvironmentSettings = default; + BatchPoolInfo poolInfo = default; + OnAllBatchTasksComplete? onAllTasksComplete = default; + OnBatchTaskFailure? onTaskFailure = default; + BatchJobNetworkConfiguration networkConfiguration = default; + IList metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("usesTaskDependencies"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usesTaskDependencies = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("priority"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + priority = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("allowTaskPreemption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allowTaskPreemption = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("maxParallelTasks"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxParallelTasks = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchJobConstraints.DeserializeBatchJobConstraints(property.Value, options); + continue; + } + if (property.NameEquals("jobManagerTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobManagerTask = BatchJobManagerTask.DeserializeBatchJobManagerTask(property.Value, options); + continue; + } + if (property.NameEquals("jobPreparationTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobPreparationTask = BatchJobPreparationTask.DeserializeBatchJobPreparationTask(property.Value, options); + continue; + } + if (property.NameEquals("jobReleaseTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobReleaseTask = BatchJobReleaseTask.DeserializeBatchJobReleaseTask(property.Value, options); + continue; + } + if (property.NameEquals("commonEnvironmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + commonEnvironmentSettings = array; + continue; + } + if (property.NameEquals("poolInfo"u8)) + { + poolInfo = BatchPoolInfo.DeserializeBatchPoolInfo(property.Value, options); + continue; + } + if (property.NameEquals("onAllTasksComplete"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + continue; + } + if (property.NameEquals("onTaskFailure"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onTaskFailure = new OnBatchTaskFailure(property.Value.GetString()); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = BatchJobNetworkConfiguration.DeserializeBatchJobNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobCreateContent( + id, + displayName, + usesTaskDependencies, + priority, + allowTaskPreemption, + maxParallelTasks, + constraints, + jobManagerTask, + jobPreparationTask, + jobReleaseTask, + commonEnvironmentSettings ?? new ChangeTrackingList(), + poolInfo, + onAllTasksComplete, + onTaskFailure, + networkConfiguration, + metadata ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchJobCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobCreateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobCreateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobCreateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.cs new file mode 100644 index 0000000000000..ea1cd55c27ed0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.cs @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for creating an Azure Batch Job. + public partial class BatchJobCreateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The Pool on which the Batch service runs the Job's Tasks. + /// or is null. + public BatchJobCreateContent(string id, BatchPoolInfo poolInfo) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(poolInfo, nameof(poolInfo)); + + Id = id; + CommonEnvironmentSettings = new ChangeTrackingList(); + PoolInfo = poolInfo; + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The execution constraints for the Job. + /// Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. + /// The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. + /// The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. + /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + /// The Pool on which the Batch service runs the Job's Tasks. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The network configuration for the Job. + /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// Keeps track of any properties unknown to the library. + internal BatchJobCreateContent(string id, string displayName, bool? usesTaskDependencies, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, IList metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + UsesTaskDependencies = usesTaskDependencies; + Priority = priority; + AllowTaskPreemption = allowTaskPreemption; + MaxParallelTasks = maxParallelTasks; + Constraints = constraints; + JobManagerTask = jobManagerTask; + JobPreparationTask = jobPreparationTask; + JobReleaseTask = jobReleaseTask; + CommonEnvironmentSettings = commonEnvironmentSettings; + PoolInfo = poolInfo; + OnAllTasksComplete = onAllTasksComplete; + OnTaskFailure = onTaskFailure; + NetworkConfiguration = networkConfiguration; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobCreateContent() + { + } + + /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + public string Id { get; } + /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + public bool? UsesTaskDependencies { get; set; } + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + public int? Priority { get; set; } + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + public bool? AllowTaskPreemption { get; set; } + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + public int? MaxParallelTasks { get; set; } + /// The execution constraints for the Job. + public BatchJobConstraints Constraints { get; set; } + /// Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. + public BatchJobManagerTask JobManagerTask { get; set; } + /// The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. + public BatchJobPreparationTask JobPreparationTask { get; set; } + /// The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. + public BatchJobReleaseTask JobReleaseTask { get; set; } + /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + public IList CommonEnvironmentSettings { get; } + /// The Pool on which the Batch service runs the Job's Tasks. + public BatchPoolInfo PoolInfo { get; } + /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + public OnBatchTaskFailure? OnTaskFailure { get; set; } + /// The network configuration for the Job. + public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } + /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.Serialization.cs new file mode 100644 index 0000000000000..c9728fe091e73 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobDisableContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("disableTasks"u8); + writer.WriteStringValue(DisableTasks.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobDisableContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobDisableContent(document.RootElement, options); + } + + internal static BatchJobDisableContent DeserializeBatchJobDisableContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DisableBatchJobOption disableTasks = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("disableTasks"u8)) + { + disableTasks = new DisableBatchJobOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobDisableContent(disableTasks, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support writing '{options.Format}' format."); + } + } + + BatchJobDisableContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobDisableContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobDisableContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobDisableContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.cs new file mode 100644 index 0000000000000..3ad54193906b3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.cs @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for disabling an Azure Batch Job. + public partial class BatchJobDisableContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// What to do with active Tasks associated with the Job. + public BatchJobDisableContent(DisableBatchJobOption disableTasks) + { + DisableTasks = disableTasks; + } + + /// Initializes a new instance of . + /// What to do with active Tasks associated with the Job. + /// Keeps track of any properties unknown to the library. + internal BatchJobDisableContent(DisableBatchJobOption disableTasks, IDictionary serializedAdditionalRawData) + { + DisableTasks = disableTasks; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobDisableContent() + { + } + + /// What to do with active Tasks associated with the Job. + public DisableBatchJobOption DisableTasks { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobExecutionInfo.Serialization.cs new file mode 100644 index 0000000000000..474ef422c50ed --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobExecutionInfo.Serialization.cs @@ -0,0 +1,193 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobExecutionInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobExecutionInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + if (Optional.IsDefined(PoolId)) + { + writer.WritePropertyName("poolId"u8); + writer.WriteStringValue(PoolId); + } + if (Optional.IsDefined(SchedulingError)) + { + writer.WritePropertyName("schedulingError"u8); + writer.WriteObjectValue(SchedulingError, options); + } + if (Optional.IsDefined(TerminationReason)) + { + writer.WritePropertyName("terminateReason"u8); + writer.WriteStringValue(TerminationReason); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobExecutionInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobExecutionInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobExecutionInfo(document.RootElement, options); + } + + internal static BatchJobExecutionInfo DeserializeBatchJobExecutionInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset startTime = default; + DateTimeOffset? endTime = default; + string poolId = default; + BatchJobSchedulingError schedulingError = default; + string terminateReason = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("poolId"u8)) + { + poolId = property.Value.GetString(); + continue; + } + if (property.NameEquals("schedulingError"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + schedulingError = BatchJobSchedulingError.DeserializeBatchJobSchedulingError(property.Value, options); + continue; + } + if (property.NameEquals("terminateReason"u8)) + { + terminateReason = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobExecutionInfo( + startTime, + endTime, + poolId, + schedulingError, + terminateReason, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobExecutionInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchJobExecutionInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobExecutionInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobExecutionInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobExecutionInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobExecutionInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobExecutionInfo.cs new file mode 100644 index 0000000000000..331650937665c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobExecutionInfo.cs @@ -0,0 +1,88 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Contains information about the execution of a Job in the Azure Batch service. + public partial class BatchJobExecutionInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The start time of the Job. This is the time at which the Job was created. + public BatchJobExecutionInfo(DateTimeOffset startTime) + { + StartTime = startTime; + } + + /// Initializes a new instance of . + /// The start time of the Job. This is the time at which the Job was created. + /// The completion time of the Job. This property is set only if the Job is in the completed state. + /// The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. + /// Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. + /// A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. + /// Keeps track of any properties unknown to the library. + internal BatchJobExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, string poolId, BatchJobSchedulingError schedulingError, string terminationReason, IDictionary serializedAdditionalRawData) + { + StartTime = startTime; + EndTime = endTime; + PoolId = poolId; + SchedulingError = schedulingError; + TerminationReason = terminationReason; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobExecutionInfo() + { + } + + /// The start time of the Job. This is the time at which the Job was created. + public DateTimeOffset StartTime { get; set; } + /// The completion time of the Job. This property is set only if the Job is in the completed state. + public DateTimeOffset? EndTime { get; set; } + /// The ID of the Pool to which this Job is assigned. This element contains the actual Pool where the Job is assigned. When you get Job details from the service, they also contain a poolInfo element, which contains the Pool configuration data from when the Job was added or updated. That poolInfo element may also contain a poolId element. If it does, the two IDs are the same. If it does not, it means the Job ran on an auto Pool, and this property contains the ID of that auto Pool. + public string PoolId { get; set; } + /// Details of any error encountered by the service in starting the Job. This property is not set if there was no error starting the Job. + public BatchJobSchedulingError SchedulingError { get; set; } + /// A string describing the reason the Job ended. This property is set only if the Job is in the completed state. If the Batch service terminates the Job, it sets the reason as follows: JMComplete - the Job Manager Task completed, and killJobOnCompletion was set to true. MaxWallClockTimeExpiry - the Job reached its maxWallClockTime constraint. TerminateJobSchedule - the Job ran as part of a schedule, and the schedule terminated. AllTasksComplete - the Job's onAllTasksComplete attribute is set to terminatejob, and all Tasks in the Job are complete. TaskFailed - the Job's onTaskFailure attribute is set to performExitOptionsJobAction, and a Task in the Job failed with an exit condition that specified a jobAction of terminatejob. Any other string is a user-defined reason specified in a call to the 'Terminate a Job' operation. + public string TerminationReason { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.Serialization.cs new file mode 100644 index 0000000000000..de8469b51cc05 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.Serialization.cs @@ -0,0 +1,390 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobManagerTask : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobManagerTask)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + writer.WritePropertyName("commandLine"u8); + writer.WriteStringValue(CommandLine); + if (Optional.IsDefined(ContainerSettings)) + { + writer.WritePropertyName("containerSettings"u8); + writer.WriteObjectValue(ContainerSettings, options); + } + if (Optional.IsCollectionDefined(ResourceFiles)) + { + writer.WritePropertyName("resourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in ResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(OutputFiles)) + { + writer.WritePropertyName("outputFiles"u8); + writer.WriteStartArray(); + foreach (var item in OutputFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(EnvironmentSettings)) + { + writer.WritePropertyName("environmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in EnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (Optional.IsDefined(RequiredSlots)) + { + writer.WritePropertyName("requiredSlots"u8); + writer.WriteNumberValue(RequiredSlots.Value); + } + if (Optional.IsDefined(KillJobOnCompletion)) + { + writer.WritePropertyName("killJobOnCompletion"u8); + writer.WriteBooleanValue(KillJobOnCompletion.Value); + } + if (Optional.IsDefined(UserIdentity)) + { + writer.WritePropertyName("userIdentity"u8); + writer.WriteObjectValue(UserIdentity, options); + } + if (Optional.IsDefined(RunExclusive)) + { + writer.WritePropertyName("runExclusive"u8); + writer.WriteBooleanValue(RunExclusive.Value); + } + if (Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(AuthenticationTokenSettings)) + { + writer.WritePropertyName("authenticationTokenSettings"u8); + writer.WriteObjectValue(AuthenticationTokenSettings, options); + } + if (Optional.IsDefined(AllowLowPriorityNode)) + { + writer.WritePropertyName("allowLowPriorityNode"u8); + writer.WriteBooleanValue(AllowLowPriorityNode.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobManagerTask IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobManagerTask)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobManagerTask(document.RootElement, options); + } + + internal static BatchJobManagerTask DeserializeBatchJobManagerTask(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + string commandLine = default; + BatchTaskContainerSettings containerSettings = default; + IList resourceFiles = default; + IList outputFiles = default; + IList environmentSettings = default; + BatchTaskConstraints constraints = default; + int? requiredSlots = default; + bool? killJobOnCompletion = default; + UserIdentity userIdentity = default; + bool? runExclusive = default; + IList applicationPackageReferences = default; + AuthenticationTokenSettings authenticationTokenSettings = default; + bool? allowLowPriorityNode = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("commandLine"u8)) + { + commandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerSettings = BatchTaskContainerSettings.DeserializeBatchTaskContainerSettings(property.Value, options); + continue; + } + if (property.NameEquals("resourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + resourceFiles = array; + continue; + } + if (property.NameEquals("outputFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OutputFile.DeserializeOutputFile(item, options)); + } + outputFiles = array; + continue; + } + if (property.NameEquals("environmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + environmentSettings = array; + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchTaskConstraints.DeserializeBatchTaskConstraints(property.Value, options); + continue; + } + if (property.NameEquals("requiredSlots"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + requiredSlots = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("killJobOnCompletion"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + killJobOnCompletion = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("userIdentity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + userIdentity = UserIdentity.DeserializeUserIdentity(property.Value, options); + continue; + } + if (property.NameEquals("runExclusive"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + runExclusive = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("authenticationTokenSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + authenticationTokenSettings = AuthenticationTokenSettings.DeserializeAuthenticationTokenSettings(property.Value, options); + continue; + } + if (property.NameEquals("allowLowPriorityNode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allowLowPriorityNode = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobManagerTask( + id, + displayName, + commandLine, + containerSettings, + resourceFiles ?? new ChangeTrackingList(), + outputFiles ?? new ChangeTrackingList(), + environmentSettings ?? new ChangeTrackingList(), + constraints, + requiredSlots, + killJobOnCompletion, + userIdentity, + runExclusive, + applicationPackageReferences ?? new ChangeTrackingList(), + authenticationTokenSettings, + allowLowPriorityNode, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobManagerTask)} does not support writing '{options.Format}' format."); + } + } + + BatchJobManagerTask IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobManagerTask(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobManagerTask)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobManagerTask FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobManagerTask(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs new file mode 100644 index 0000000000000..7f679bb2fb593 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobManagerTask.cs @@ -0,0 +1,180 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Specifies details of a Job Manager Task. + /// The Job Manager Task is automatically started when the Job is created. The + /// Batch service tries to schedule the Job Manager Task before any other Tasks in + /// the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where + /// Job Manager Tasks are running for as long as possible (that is, Compute Nodes + /// running 'normal' Tasks are removed before Compute Nodes running Job Manager + /// Tasks). When a Job Manager Task fails and needs to be restarted, the system + /// tries to schedule it at the highest priority. If there are no idle Compute + /// Nodes available, the system may terminate one of the running Tasks in the Pool + /// and return it to the queue in order to make room for the Job Manager Task to + /// restart. Note that a Job Manager Task in one Job does not have priority over + /// Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For + /// example, if a Job Manager in a priority 0 Job needs to be restarted, it will + /// not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery + /// operation is triggered on a Node. Examples of recovery operations include (but + /// are not limited to) when an unhealthy Node is rebooted or a Compute Node + /// disappeared due to host failure. Retries due to recovery operations are + /// independent of and are not counted against the maxTaskRetryCount. Even if the + /// maxTaskRetryCount is 0, an internal retry due to a recovery operation may + /// occur. Because of this, all Tasks should be idempotent. This means Tasks need + /// to tolerate being interrupted and restarted without causing any corruption or + /// duplicate data. The best practice for long running Tasks is to use some form of + /// checkpointing. + /// + public partial class BatchJobManagerTask + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. + /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// or is null. + public BatchJobManagerTask(string id, string commandLine) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(commandLine, nameof(commandLine)); + + Id = id; + CommandLine = commandLine; + ResourceFiles = new ChangeTrackingList(); + OutputFiles = new ChangeTrackingList(); + EnvironmentSettings = new ChangeTrackingList(); + ApplicationPackageReferences = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. + /// The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + /// A list of environment variable settings for the Job Manager Task. + /// Constraints that apply to the Job Manager Task. + /// The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. + /// Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. + /// The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. + /// + /// A list of Application Packages that the Batch service will deploy to the + /// Compute Node before running the command line.Application Packages are + /// downloaded and deployed to a shared directory, not the Task working + /// directory. Therefore, if a referenced Application Package is already + /// on the Compute Node, and is up to date, then it is not re-downloaded; + /// the existing copy on the Compute Node is used. If a referenced Application + /// Package cannot be installed, for example because the package has been deleted + /// or because download failed, the Task fails. + /// + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + /// Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. + /// Keeps track of any properties unknown to the library. + internal BatchJobManagerTask(string id, string displayName, string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList outputFiles, IList environmentSettings, BatchTaskConstraints constraints, int? requiredSlots, bool? killJobOnCompletion, UserIdentity userIdentity, bool? runExclusive, IList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, bool? allowLowPriorityNode, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + CommandLine = commandLine; + ContainerSettings = containerSettings; + ResourceFiles = resourceFiles; + OutputFiles = outputFiles; + EnvironmentSettings = environmentSettings; + Constraints = constraints; + RequiredSlots = requiredSlots; + KillJobOnCompletion = killJobOnCompletion; + UserIdentity = userIdentity; + RunExclusive = runExclusive; + ApplicationPackageReferences = applicationPackageReferences; + AuthenticationTokenSettings = authenticationTokenSettings; + AllowLowPriorityNode = allowLowPriorityNode; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobManagerTask() + { + } + + /// A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. + public string Id { get; set; } + /// The display name of the Job Manager Task. It need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// The command line of the Job Manager Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + public string CommandLine { get; set; } + /// The settings for the container under which the Job Manager Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + public BatchTaskContainerSettings ContainerSettings { get; set; } + /// A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + public IList ResourceFiles { get; } + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + public IList OutputFiles { get; } + /// A list of environment variable settings for the Job Manager Task. + public IList EnvironmentSettings { get; } + /// Constraints that apply to the Job Manager Task. + public BatchTaskConstraints Constraints { get; set; } + /// The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this property is not supported and must not be specified. + public int? RequiredSlots { get; set; } + /// Whether completion of the Job Manager Task signifies completion of the entire Job. If true, when the Job Manager Task completes, the Batch service marks the Job as complete. If any Tasks are still running at this time (other than Job Release), those Tasks are terminated. If false, the completion of the Job Manager Task does not affect the Job status. In this case, you should either use the onAllTasksComplete attribute to terminate the Job, or have a client or user terminate the Job explicitly. An example of this is if the Job Manager creates a set of Tasks but then takes no further role in their execution. The default value is true. If you are using the onAllTasksComplete and onTaskFailure attributes to control Job lifetime, and using the Job Manager Task only to create the Tasks for the Job (not to monitor progress), then it is important to set killJobOnCompletion to false. + public bool? KillJobOnCompletion { get; set; } + /// The user identity under which the Job Manager Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + public UserIdentity UserIdentity { get; set; } + /// Whether the Job Manager Task requires exclusive use of the Compute Node where it runs. If true, no other Tasks will run on the same Node for as long as the Job Manager is running. If false, other Tasks can run simultaneously with the Job Manager on a Compute Node. The Job Manager Task counts normally against the Compute Node's concurrent Task limit, so this is only relevant if the Compute Node allows multiple concurrent Tasks. The default value is true. + public bool? RunExclusive { get; set; } + /// + /// A list of Application Packages that the Batch service will deploy to the + /// Compute Node before running the command line.Application Packages are + /// downloaded and deployed to a shared directory, not the Task working + /// directory. Therefore, if a referenced Application Package is already + /// on the Compute Node, and is up to date, then it is not re-downloaded; + /// the existing copy on the Compute Node is used. If a referenced Application + /// Package cannot be installed, for example because the package has been deleted + /// or because download failed, the Task fails. + /// + public IList ApplicationPackageReferences { get; } + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + public AuthenticationTokenSettings AuthenticationTokenSettings { get; set; } + /// Whether the Job Manager Task may run on a Spot/Low-priority Compute Node. The default value is true. + public bool? AllowLowPriorityNode { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs new file mode 100644 index 0000000000000..9345163de4357 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobNetworkConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobNetworkConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("subnetId"u8); + writer.WriteStringValue(SubnetId); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobNetworkConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobNetworkConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobNetworkConfiguration(document.RootElement, options); + } + + internal static BatchJobNetworkConfiguration DeserializeBatchJobNetworkConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string subnetId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("subnetId"u8)) + { + subnetId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobNetworkConfiguration(subnetId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobNetworkConfiguration)} does not support writing '{options.Format}' format."); + } + } + + BatchJobNetworkConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobNetworkConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobNetworkConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobNetworkConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobNetworkConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs new file mode 100644 index 0000000000000..d6697158bf5e3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The network configuration for the Job. + public partial class BatchJobNetworkConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// is null. + public BatchJobNetworkConfiguration(string subnetId) + { + Argument.AssertNotNull(subnetId, nameof(subnetId)); + + SubnetId = subnetId; + } + + /// Initializes a new instance of . + /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// Keeps track of any properties unknown to the library. + internal BatchJobNetworkConfiguration(string subnetId, IDictionary serializedAdditionalRawData) + { + SubnetId = subnetId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobNetworkConfiguration() + { + } + + /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. This will only work with a VirtualMachineConfiguration Pool. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + public string SubnetId { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs new file mode 100644 index 0000000000000..ed82f3ea661e3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs @@ -0,0 +1,196 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobPreparationAndReleaseTaskStatus : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobPreparationAndReleaseTaskStatus)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(PoolId)) + { + writer.WritePropertyName("poolId"u8); + writer.WriteStringValue(PoolId); + } + if (Optional.IsDefined(NodeId)) + { + writer.WritePropertyName("nodeId"u8); + writer.WriteStringValue(NodeId); + } + if (Optional.IsDefined(NodeUrl)) + { + writer.WritePropertyName("nodeUrl"u8); + writer.WriteStringValue(NodeUrl); + } + if (Optional.IsDefined(JobPreparationTaskExecutionInfo)) + { + writer.WritePropertyName("jobPreparationTaskExecutionInfo"u8); + writer.WriteObjectValue(JobPreparationTaskExecutionInfo, options); + } + if (Optional.IsDefined(JobReleaseTaskExecutionInfo)) + { + writer.WritePropertyName("jobReleaseTaskExecutionInfo"u8); + writer.WriteObjectValue(JobReleaseTaskExecutionInfo, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobPreparationAndReleaseTaskStatus IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobPreparationAndReleaseTaskStatus)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobPreparationAndReleaseTaskStatus(document.RootElement, options); + } + + internal static BatchJobPreparationAndReleaseTaskStatus DeserializeBatchJobPreparationAndReleaseTaskStatus(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string poolId = default; + string nodeId = default; + string nodeUrl = default; + BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo = default; + BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("poolId"u8)) + { + poolId = property.Value.GetString(); + continue; + } + if (property.NameEquals("nodeId"u8)) + { + nodeId = property.Value.GetString(); + continue; + } + if (property.NameEquals("nodeUrl"u8)) + { + nodeUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("jobPreparationTaskExecutionInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobPreparationTaskExecutionInfo = BatchJobPreparationTaskExecutionInfo.DeserializeBatchJobPreparationTaskExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("jobReleaseTaskExecutionInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobReleaseTaskExecutionInfo = BatchJobReleaseTaskExecutionInfo.DeserializeBatchJobReleaseTaskExecutionInfo(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobPreparationAndReleaseTaskStatus( + poolId, + nodeId, + nodeUrl, + jobPreparationTaskExecutionInfo, + jobReleaseTaskExecutionInfo, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobPreparationAndReleaseTaskStatus)} does not support writing '{options.Format}' format."); + } + } + + BatchJobPreparationAndReleaseTaskStatus IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobPreparationAndReleaseTaskStatus(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobPreparationAndReleaseTaskStatus)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobPreparationAndReleaseTaskStatus FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobPreparationAndReleaseTaskStatus(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs new file mode 100644 index 0000000000000..722bcb9a92d71 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The status of the Job Preparation and Job Release Tasks on a Compute Node. + public partial class BatchJobPreparationAndReleaseTaskStatus + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchJobPreparationAndReleaseTaskStatus() + { + } + + /// Initializes a new instance of . + /// The ID of the Pool containing the Compute Node to which this entry refers. + /// The ID of the Compute Node to which this entry refers. + /// The URL of the Compute Node to which this entry refers. + /// Information about the execution status of the Job Preparation Task on this Compute Node. + /// Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. + /// Keeps track of any properties unknown to the library. + internal BatchJobPreparationAndReleaseTaskStatus(string poolId, string nodeId, string nodeUrl, BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo, BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo, IDictionary serializedAdditionalRawData) + { + PoolId = poolId; + NodeId = nodeId; + NodeUrl = nodeUrl; + JobPreparationTaskExecutionInfo = jobPreparationTaskExecutionInfo; + JobReleaseTaskExecutionInfo = jobReleaseTaskExecutionInfo; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ID of the Pool containing the Compute Node to which this entry refers. + public string PoolId { get; } + /// The ID of the Compute Node to which this entry refers. + public string NodeId { get; } + /// The URL of the Compute Node to which this entry refers. + public string NodeUrl { get; } + /// Information about the execution status of the Job Preparation Task on this Compute Node. + public BatchJobPreparationTaskExecutionInfo JobPreparationTaskExecutionInfo { get; } + /// Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. + public BatchJobReleaseTaskExecutionInfo JobReleaseTaskExecutionInfo { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.Serialization.cs new file mode 100644 index 0000000000000..86bd6b822b3b6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.Serialization.cs @@ -0,0 +1,281 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobPreparationTask : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobPreparationTask)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + writer.WritePropertyName("commandLine"u8); + writer.WriteStringValue(CommandLine); + if (Optional.IsDefined(ContainerSettings)) + { + writer.WritePropertyName("containerSettings"u8); + writer.WriteObjectValue(ContainerSettings, options); + } + if (Optional.IsCollectionDefined(ResourceFiles)) + { + writer.WritePropertyName("resourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in ResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(EnvironmentSettings)) + { + writer.WritePropertyName("environmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in EnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (Optional.IsDefined(WaitForSuccess)) + { + writer.WritePropertyName("waitForSuccess"u8); + writer.WriteBooleanValue(WaitForSuccess.Value); + } + if (Optional.IsDefined(UserIdentity)) + { + writer.WritePropertyName("userIdentity"u8); + writer.WriteObjectValue(UserIdentity, options); + } + if (Optional.IsDefined(RerunOnNodeRebootAfterSuccess)) + { + writer.WritePropertyName("rerunOnNodeRebootAfterSuccess"u8); + writer.WriteBooleanValue(RerunOnNodeRebootAfterSuccess.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobPreparationTask IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobPreparationTask)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobPreparationTask(document.RootElement, options); + } + + internal static BatchJobPreparationTask DeserializeBatchJobPreparationTask(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string commandLine = default; + BatchTaskContainerSettings containerSettings = default; + IList resourceFiles = default; + IList environmentSettings = default; + BatchTaskConstraints constraints = default; + bool? waitForSuccess = default; + UserIdentity userIdentity = default; + bool? rerunOnNodeRebootAfterSuccess = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("commandLine"u8)) + { + commandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerSettings = BatchTaskContainerSettings.DeserializeBatchTaskContainerSettings(property.Value, options); + continue; + } + if (property.NameEquals("resourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + resourceFiles = array; + continue; + } + if (property.NameEquals("environmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + environmentSettings = array; + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchTaskConstraints.DeserializeBatchTaskConstraints(property.Value, options); + continue; + } + if (property.NameEquals("waitForSuccess"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + waitForSuccess = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("userIdentity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + userIdentity = UserIdentity.DeserializeUserIdentity(property.Value, options); + continue; + } + if (property.NameEquals("rerunOnNodeRebootAfterSuccess"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + rerunOnNodeRebootAfterSuccess = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobPreparationTask( + id, + commandLine, + containerSettings, + resourceFiles ?? new ChangeTrackingList(), + environmentSettings ?? new ChangeTrackingList(), + constraints, + waitForSuccess, + userIdentity, + rerunOnNodeRebootAfterSuccess, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobPreparationTask)} does not support writing '{options.Format}' format."); + } + } + + BatchJobPreparationTask IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobPreparationTask(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobPreparationTask)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobPreparationTask FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobPreparationTask(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs new file mode 100644 index 0000000000000..ce02002b17a04 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTask.cs @@ -0,0 +1,136 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. + /// You can use Job Preparation to prepare a Node to run Tasks for the Job. + /// Activities commonly performed in Job Preparation include: Downloading common + /// resource files used by all the Tasks in the Job. The Job Preparation Task can + /// download these common resource files to the shared location on the Node. + /// (AZ_BATCH_NODE_ROOT_DIR\shared), or starting a local service on the Node so + /// that all Tasks of that Job can communicate with it. If the Job Preparation Task + /// fails (that is, exhausts its retry count before exiting with exit code 0), + /// Batch will not run Tasks of this Job on the Node. The Compute Node remains + /// ineligible to run Tasks of this Job until it is reimaged. The Compute Node + /// remains active and can be used for other Jobs. The Job Preparation Task can run + /// multiple times on the same Node. Therefore, you should write the Job + /// Preparation Task to handle re-execution. If the Node is rebooted, the Job + /// Preparation Task is run again on the Compute Node before scheduling any other + /// Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job + /// Preparation Task did not previously complete. If the Node is reimaged, the Job + /// Preparation Task is run again before scheduling any Task of the Job. Batch will + /// retry Tasks when a recovery operation is triggered on a Node. Examples of + /// recovery operations include (but are not limited to) when an unhealthy Node is + /// rebooted or a Compute Node disappeared due to host failure. Retries due to + /// recovery operations are independent of and are not counted against the + /// maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to + /// a recovery operation may occur. Because of this, all Tasks should be + /// idempotent. This means Tasks need to tolerate being interrupted and restarted + /// without causing any corruption or duplicate data. The best practice for long + /// running Tasks is to use some form of checkpointing. + /// + public partial class BatchJobPreparationTask + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// is null. + public BatchJobPreparationTask(string commandLine) + { + Argument.AssertNotNull(commandLine, nameof(commandLine)); + + CommandLine = commandLine; + ResourceFiles = new ChangeTrackingList(); + EnvironmentSettings = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). + /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// A list of environment variable settings for the Job Preparation Task. + /// Constraints that apply to the Job Preparation Task. + /// Whether the Batch service should wait for the Job Preparation Task to complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true. + /// The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes. + /// Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true. + /// Keeps track of any properties unknown to the library. + internal BatchJobPreparationTask(string id, string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList environmentSettings, BatchTaskConstraints constraints, bool? waitForSuccess, UserIdentity userIdentity, bool? rerunOnNodeRebootAfterSuccess, IDictionary serializedAdditionalRawData) + { + Id = id; + CommandLine = commandLine; + ContainerSettings = containerSettings; + ResourceFiles = resourceFiles; + EnvironmentSettings = environmentSettings; + Constraints = constraints; + WaitForSuccess = waitForSuccess; + UserIdentity = userIdentity; + RerunOnNodeRebootAfterSuccess = rerunOnNodeRebootAfterSuccess; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobPreparationTask() + { + } + + /// A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobpreparation'. No other Task in the Job can have the same ID as the Job Preparation Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobPreparationTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). + public string Id { get; set; } + /// The command line of the Job Preparation Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + public string CommandLine { get; set; } + /// The settings for the container under which the Job Preparation Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + public BatchTaskContainerSettings ContainerSettings { get; set; } + /// A list of files that the Batch service will download to the Compute Node before running the command line. Files listed under this element are located in the Task's working directory. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + public IList ResourceFiles { get; } + /// A list of environment variable settings for the Job Preparation Task. + public IList EnvironmentSettings { get; } + /// Constraints that apply to the Job Preparation Task. + public BatchTaskConstraints Constraints { get; set; } + /// Whether the Batch service should wait for the Job Preparation Task to complete successfully before scheduling any other Tasks of the Job on the Compute Node. A Job Preparation Task has completed successfully if it exits with exit code 0. If true and the Job Preparation Task fails on a Node, the Batch service retries the Job Preparation Task up to its maximum retry count (as specified in the constraints element). If the Task has still not completed successfully after all retries, then the Batch service will not schedule Tasks of the Job to the Node. The Node remains active and eligible to run Tasks of other Jobs. If false, the Batch service will not wait for the Job Preparation Task to complete. In this case, other Tasks of the Job can start executing on the Compute Node while the Job Preparation Task is still running; and even if the Job Preparation Task fails, new Tasks will continue to be scheduled on the Compute Node. The default value is true. + public bool? WaitForSuccess { get; set; } + /// The user identity under which the Job Preparation Task runs. If omitted, the Task runs as a non-administrative user unique to the Task on Windows Compute Nodes, or a non-administrative user unique to the Pool on Linux Compute Nodes. + public UserIdentity UserIdentity { get; set; } + /// Whether the Batch service should rerun the Job Preparation Task after a Compute Node reboots. The Job Preparation Task is always rerun if a Compute Node is reimaged, or if the Job Preparation Task did not complete (e.g. because the reboot occurred while the Task was running). Therefore, you should always write a Job Preparation Task to be idempotent and to behave correctly if run multiple times. The default value is true. + public bool? RerunOnNodeRebootAfterSuccess { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs new file mode 100644 index 0000000000000..96e55f511fcb8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs @@ -0,0 +1,275 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobPreparationTaskExecutionInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobPreparationTaskExecutionInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.ToString()); + if (Optional.IsDefined(TaskRootDirectory)) + { + writer.WritePropertyName("taskRootDirectory"u8); + writer.WriteStringValue(TaskRootDirectory); + } + if (Optional.IsDefined(TaskRootDirectoryUrl)) + { + writer.WritePropertyName("taskRootDirectoryUrl"u8); + writer.WriteStringValue(TaskRootDirectoryUrl); + } + if (Optional.IsDefined(ExitCode)) + { + writer.WritePropertyName("exitCode"u8); + writer.WriteNumberValue(ExitCode.Value); + } + if (Optional.IsDefined(ContainerInfo)) + { + writer.WritePropertyName("containerInfo"u8); + writer.WriteObjectValue(ContainerInfo, options); + } + if (Optional.IsDefined(FailureInfo)) + { + writer.WritePropertyName("failureInfo"u8); + writer.WriteObjectValue(FailureInfo, options); + } + writer.WritePropertyName("retryCount"u8); + writer.WriteNumberValue(RetryCount); + if (Optional.IsDefined(LastRetryTime)) + { + writer.WritePropertyName("lastRetryTime"u8); + writer.WriteStringValue(LastRetryTime.Value, "O"); + } + if (Optional.IsDefined(Result)) + { + writer.WritePropertyName("result"u8); + writer.WriteStringValue(Result.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobPreparationTaskExecutionInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobPreparationTaskExecutionInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobPreparationTaskExecutionInfo(document.RootElement, options); + } + + internal static BatchJobPreparationTaskExecutionInfo DeserializeBatchJobPreparationTaskExecutionInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset startTime = default; + DateTimeOffset? endTime = default; + BatchJobPreparationTaskState state = default; + string taskRootDirectory = default; + string taskRootDirectoryUrl = default; + int? exitCode = default; + BatchTaskContainerExecutionInfo containerInfo = default; + BatchTaskFailureInfo failureInfo = default; + int retryCount = default; + DateTimeOffset? lastRetryTime = default; + BatchTaskExecutionResult? result = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("state"u8)) + { + state = new BatchJobPreparationTaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("taskRootDirectory"u8)) + { + taskRootDirectory = property.Value.GetString(); + continue; + } + if (property.NameEquals("taskRootDirectoryUrl"u8)) + { + taskRootDirectoryUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("exitCode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitCode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("containerInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerInfo = BatchTaskContainerExecutionInfo.DeserializeBatchTaskContainerExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("failureInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + failureInfo = BatchTaskFailureInfo.DeserializeBatchTaskFailureInfo(property.Value, options); + continue; + } + if (property.NameEquals("retryCount"u8)) + { + retryCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("lastRetryTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastRetryTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("result"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + result = new BatchTaskExecutionResult(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobPreparationTaskExecutionInfo( + startTime, + endTime, + state, + taskRootDirectory, + taskRootDirectoryUrl, + exitCode, + containerInfo, + failureInfo, + retryCount, + lastRetryTime, + result, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobPreparationTaskExecutionInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchJobPreparationTaskExecutionInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobPreparationTaskExecutionInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobPreparationTaskExecutionInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobPreparationTaskExecutionInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobPreparationTaskExecutionInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs new file mode 100644 index 0000000000000..8d65cf18954eb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Contains information about the execution of a Job Preparation Task on a Compute + /// Node. + /// + public partial class BatchJobPreparationTaskExecutionInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + /// The current state of the Job Preparation Task on the Compute Node. + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + internal BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime, BatchJobPreparationTaskState state, int retryCount) + { + StartTime = startTime; + State = state; + RetryCount = retryCount; + } + + /// Initializes a new instance of . + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + /// The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. + /// The current state of the Job Preparation Task on the Compute Node. + /// The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. + /// The URL to the root directory of the Job Preparation Task on the Compute Node. + /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + /// The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// Keeps track of any properties unknown to the library. + internal BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, BatchJobPreparationTaskState state, string taskRootDirectory, string taskRootDirectoryUrl, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, int retryCount, DateTimeOffset? lastRetryTime, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + { + StartTime = startTime; + EndTime = endTime; + State = state; + TaskRootDirectory = taskRootDirectory; + TaskRootDirectoryUrl = taskRootDirectoryUrl; + ExitCode = exitCode; + ContainerInfo = containerInfo; + FailureInfo = failureInfo; + RetryCount = retryCount; + LastRetryTime = lastRetryTime; + Result = result; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobPreparationTaskExecutionInfo() + { + } + + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + public DateTimeOffset StartTime { get; } + /// The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. + public DateTimeOffset? EndTime { get; } + /// The current state of the Job Preparation Task on the Compute Node. + public BatchJobPreparationTaskState State { get; } + /// The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. + public string TaskRootDirectory { get; } + /// The URL to the root directory of the Job Preparation Task on the Compute Node. + public string TaskRootDirectoryUrl { get; } + /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. + public int? ExitCode { get; } + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + public BatchTaskContainerExecutionInfo ContainerInfo { get; } + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + public BatchTaskFailureInfo FailureInfo { get; } + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + public int RetryCount { get; } + /// The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + public DateTimeOffset? LastRetryTime { get; } + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + public BatchTaskExecutionResult? Result { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskState.cs new file mode 100644 index 0000000000000..da3aff29ad1d8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskState.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchJobPreparationTaskState enums. + public readonly partial struct BatchJobPreparationTaskState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchJobPreparationTaskState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RunningValue = "running"; + private const string CompletedValue = "completed"; + + /// The Task is currently running (including retrying). + public static BatchJobPreparationTaskState Running { get; } = new BatchJobPreparationTaskState(RunningValue); + /// The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). + public static BatchJobPreparationTaskState Completed { get; } = new BatchJobPreparationTaskState(CompletedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobPreparationTaskState left, BatchJobPreparationTaskState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobPreparationTaskState left, BatchJobPreparationTaskState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchJobPreparationTaskState(string value) => new BatchJobPreparationTaskState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchJobPreparationTaskState other && Equals(other); + /// + public bool Equals(BatchJobPreparationTaskState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.Serialization.cs new file mode 100644 index 0000000000000..22d13533c3926 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.Serialization.cs @@ -0,0 +1,265 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobReleaseTask : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobReleaseTask)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + writer.WritePropertyName("commandLine"u8); + writer.WriteStringValue(CommandLine); + if (Optional.IsDefined(ContainerSettings)) + { + writer.WritePropertyName("containerSettings"u8); + writer.WriteObjectValue(ContainerSettings, options); + } + if (Optional.IsCollectionDefined(ResourceFiles)) + { + writer.WritePropertyName("resourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in ResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(EnvironmentSettings)) + { + writer.WritePropertyName("environmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in EnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(MaxWallClockTime)) + { + writer.WritePropertyName("maxWallClockTime"u8); + writer.WriteStringValue(MaxWallClockTime.Value, "P"); + } + if (Optional.IsDefined(RetentionTime)) + { + writer.WritePropertyName("retentionTime"u8); + writer.WriteStringValue(RetentionTime.Value, "P"); + } + if (Optional.IsDefined(UserIdentity)) + { + writer.WritePropertyName("userIdentity"u8); + writer.WriteObjectValue(UserIdentity, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobReleaseTask IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobReleaseTask)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobReleaseTask(document.RootElement, options); + } + + internal static BatchJobReleaseTask DeserializeBatchJobReleaseTask(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string commandLine = default; + BatchTaskContainerSettings containerSettings = default; + IList resourceFiles = default; + IList environmentSettings = default; + TimeSpan? maxWallClockTime = default; + TimeSpan? retentionTime = default; + UserIdentity userIdentity = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("commandLine"u8)) + { + commandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerSettings = BatchTaskContainerSettings.DeserializeBatchTaskContainerSettings(property.Value, options); + continue; + } + if (property.NameEquals("resourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + resourceFiles = array; + continue; + } + if (property.NameEquals("environmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + environmentSettings = array; + continue; + } + if (property.NameEquals("maxWallClockTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxWallClockTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("retentionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + retentionTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("userIdentity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + userIdentity = UserIdentity.DeserializeUserIdentity(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobReleaseTask( + id, + commandLine, + containerSettings, + resourceFiles ?? new ChangeTrackingList(), + environmentSettings ?? new ChangeTrackingList(), + maxWallClockTime, + retentionTime, + userIdentity, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobReleaseTask)} does not support writing '{options.Format}' format."); + } + } + + BatchJobReleaseTask IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobReleaseTask(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobReleaseTask)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobReleaseTask FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobReleaseTask(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs new file mode 100644 index 0000000000000..acbf28a11f7d3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTask.cs @@ -0,0 +1,122 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A Job Release Task to run on Job completion on any Compute Node where the Job has run. + /// The Job Release Task runs when the Job ends, because of one of the following: + /// The user calls the Terminate Job API, or the Delete Job API while the Job is + /// still active, the Job's maximum wall clock time constraint is reached, and the + /// Job is still active, or the Job's Job Manager Task completed, and the Job is + /// configured to terminate when the Job Manager completes. The Job Release Task + /// runs on each Node where Tasks of the Job have run and the Job Preparation Task + /// ran and completed. If you reimage a Node after it has run the Job Preparation + /// Task, and the Job ends without any further Tasks of the Job running on that + /// Node (and hence the Job Preparation Task does not re-run), then the Job Release + /// Task does not run on that Compute Node. If a Node reboots while the Job Release + /// Task is still running, the Job Release Task runs again when the Compute Node + /// starts up. The Job is not marked as complete until all Job Release Tasks have + /// completed. The Job Release Task runs in the background. It does not occupy a + /// scheduling slot; that is, it does not count towards the taskSlotsPerNode limit + /// specified on the Pool. + /// + public partial class BatchJobReleaseTask + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// is null. + public BatchJobReleaseTask(string commandLine) + { + Argument.AssertNotNull(commandLine, nameof(commandLine)); + + CommandLine = commandLine; + ResourceFiles = new ChangeTrackingList(); + EnvironmentSettings = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). + /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. + /// A list of environment variable settings for the Job Release Task. + /// The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. The default value is 15 minutes. You may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The minimum time to retain the Task directory for the Job Release Task on the Compute Node. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. + /// The user identity under which the Job Release Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// Keeps track of any properties unknown to the library. + internal BatchJobReleaseTask(string id, string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList environmentSettings, TimeSpan? maxWallClockTime, TimeSpan? retentionTime, UserIdentity userIdentity, IDictionary serializedAdditionalRawData) + { + Id = id; + CommandLine = commandLine; + ContainerSettings = containerSettings; + ResourceFiles = resourceFiles; + EnvironmentSettings = environmentSettings; + MaxWallClockTime = maxWallClockTime; + RetentionTime = retentionTime; + UserIdentity = userIdentity; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobReleaseTask() + { + } + + /// A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot contain more than 64 characters. If you do not specify this property, the Batch service assigns a default value of 'jobrelease'. No other Task in the Job can have the same ID as the Job Release Task. If you try to submit a Task with the same id, the Batch service rejects the request with error code TaskIdSameAsJobReleaseTask; if you are calling the REST API directly, the HTTP status code is 409 (Conflict). + public string Id { get; set; } + /// The command line of the Job Release Task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + public string CommandLine { get; set; } + /// The settings for the container under which the Job Release Task runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + public BatchTaskContainerSettings ContainerSettings { get; set; } + /// A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. + public IList ResourceFiles { get; } + /// A list of environment variable settings for the Job Release Task. + public IList EnvironmentSettings { get; } + /// The maximum elapsed time that the Job Release Task may run on a given Compute Node, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. The default value is 15 minutes. You may not specify a timeout longer than 15 minutes. If you do, the Batch service rejects it with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? MaxWallClockTime { get; set; } + /// The minimum time to retain the Task directory for the Job Release Task on the Compute Node. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. + public TimeSpan? RetentionTime { get; set; } + /// The user identity under which the Job Release Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + public UserIdentity UserIdentity { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs new file mode 100644 index 0000000000000..af2ca9be37313 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobReleaseTaskExecutionInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobReleaseTaskExecutionInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.ToString()); + if (Optional.IsDefined(TaskRootDirectory)) + { + writer.WritePropertyName("taskRootDirectory"u8); + writer.WriteStringValue(TaskRootDirectory); + } + if (Optional.IsDefined(TaskRootDirectoryUrl)) + { + writer.WritePropertyName("taskRootDirectoryUrl"u8); + writer.WriteStringValue(TaskRootDirectoryUrl); + } + if (Optional.IsDefined(ExitCode)) + { + writer.WritePropertyName("exitCode"u8); + writer.WriteNumberValue(ExitCode.Value); + } + if (Optional.IsDefined(ContainerInfo)) + { + writer.WritePropertyName("containerInfo"u8); + writer.WriteObjectValue(ContainerInfo, options); + } + if (Optional.IsDefined(FailureInfo)) + { + writer.WritePropertyName("failureInfo"u8); + writer.WriteObjectValue(FailureInfo, options); + } + if (Optional.IsDefined(Result)) + { + writer.WritePropertyName("result"u8); + writer.WriteStringValue(Result.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobReleaseTaskExecutionInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobReleaseTaskExecutionInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobReleaseTaskExecutionInfo(document.RootElement, options); + } + + internal static BatchJobReleaseTaskExecutionInfo DeserializeBatchJobReleaseTaskExecutionInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset startTime = default; + DateTimeOffset? endTime = default; + BatchJobReleaseTaskState state = default; + string taskRootDirectory = default; + string taskRootDirectoryUrl = default; + int? exitCode = default; + BatchTaskContainerExecutionInfo containerInfo = default; + BatchTaskFailureInfo failureInfo = default; + BatchTaskExecutionResult? result = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("state"u8)) + { + state = new BatchJobReleaseTaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("taskRootDirectory"u8)) + { + taskRootDirectory = property.Value.GetString(); + continue; + } + if (property.NameEquals("taskRootDirectoryUrl"u8)) + { + taskRootDirectoryUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("exitCode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitCode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("containerInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerInfo = BatchTaskContainerExecutionInfo.DeserializeBatchTaskContainerExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("failureInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + failureInfo = BatchTaskFailureInfo.DeserializeBatchTaskFailureInfo(property.Value, options); + continue; + } + if (property.NameEquals("result"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + result = new BatchTaskExecutionResult(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobReleaseTaskExecutionInfo( + startTime, + endTime, + state, + taskRootDirectory, + taskRootDirectoryUrl, + exitCode, + containerInfo, + failureInfo, + result, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobReleaseTaskExecutionInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchJobReleaseTaskExecutionInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobReleaseTaskExecutionInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobReleaseTaskExecutionInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobReleaseTaskExecutionInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobReleaseTaskExecutionInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs new file mode 100644 index 0000000000000..36edc24ff92a0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Contains information about the execution of a Job Release Task on a Compute + /// Node. + /// + public partial class BatchJobReleaseTaskExecutionInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + /// The current state of the Job Release Task on the Compute Node. + internal BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime, BatchJobReleaseTaskState state) + { + StartTime = startTime; + State = state; + } + + /// Initializes a new instance of . + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + /// The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. + /// The current state of the Job Release Task on the Compute Node. + /// The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. + /// The URL to the root directory of the Job Release Task on the Compute Node. + /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// Keeps track of any properties unknown to the library. + internal BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, BatchJobReleaseTaskState state, string taskRootDirectory, string taskRootDirectoryUrl, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + { + StartTime = startTime; + EndTime = endTime; + State = state; + TaskRootDirectory = taskRootDirectory; + TaskRootDirectoryUrl = taskRootDirectoryUrl; + ExitCode = exitCode; + ContainerInfo = containerInfo; + FailureInfo = failureInfo; + Result = result; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobReleaseTaskExecutionInfo() + { + } + + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + public DateTimeOffset StartTime { get; } + /// The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. + public DateTimeOffset? EndTime { get; } + /// The current state of the Job Release Task on the Compute Node. + public BatchJobReleaseTaskState State { get; } + /// The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. + public string TaskRootDirectory { get; } + /// The URL to the root directory of the Job Release Task on the Compute Node. + public string TaskRootDirectoryUrl { get; } + /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. + public int? ExitCode { get; } + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + public BatchTaskContainerExecutionInfo ContainerInfo { get; } + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + public BatchTaskFailureInfo FailureInfo { get; } + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + public BatchTaskExecutionResult? Result { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskState.cs new file mode 100644 index 0000000000000..2f42e9b5744e9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskState.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchJobReleaseTaskState enums. + public readonly partial struct BatchJobReleaseTaskState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchJobReleaseTaskState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RunningValue = "running"; + private const string CompletedValue = "completed"; + + /// The Task is currently running (including retrying). + public static BatchJobReleaseTaskState Running { get; } = new BatchJobReleaseTaskState(RunningValue); + /// The Task has exited with exit code 0, or the Task has exhausted its retry limit, or the Batch service was unable to start the Task due to Task preparation errors (such as resource file download failures). + public static BatchJobReleaseTaskState Completed { get; } = new BatchJobReleaseTaskState(CompletedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobReleaseTaskState left, BatchJobReleaseTaskState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobReleaseTaskState left, BatchJobReleaseTaskState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchJobReleaseTaskState(string value) => new BatchJobReleaseTaskState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchJobReleaseTaskState other && Equals(other); + /// + public bool Equals(BatchJobReleaseTaskState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs new file mode 100644 index 0000000000000..9ffbc11fee3dd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs @@ -0,0 +1,355 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobSchedule : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobSchedule)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (options.Format != "W" && Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (options.Format != "W" && Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (options.Format != "W" && Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && Optional.IsDefined(ETag)) + { + writer.WritePropertyName("eTag"u8); + writer.WriteStringValue(ETag); + } + if (options.Format != "W" && Optional.IsDefined(LastModified)) + { + writer.WritePropertyName("lastModified"u8); + writer.WriteStringValue(LastModified.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(CreationTime)) + { + writer.WritePropertyName("creationTime"u8); + writer.WriteStringValue(CreationTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PreviousState)) + { + writer.WritePropertyName("previousState"u8); + writer.WriteStringValue(PreviousState.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(PreviousStateTransitionTime)) + { + writer.WritePropertyName("previousStateTransitionTime"u8); + writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); + } + if (Optional.IsDefined(Schedule)) + { + writer.WritePropertyName("schedule"u8); + writer.WriteObjectValue(Schedule, options); + } + writer.WritePropertyName("jobSpecification"u8); + writer.WriteObjectValue(JobSpecification, options); + if (options.Format != "W" && Optional.IsDefined(ExecutionInfo)) + { + writer.WritePropertyName("executionInfo"u8); + writer.WriteObjectValue(ExecutionInfo, options); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(Stats)) + { + writer.WritePropertyName("stats"u8); + writer.WriteObjectValue(Stats, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobSchedule IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobSchedule)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobSchedule(document.RootElement, options); + } + + internal static BatchJobSchedule DeserializeBatchJobSchedule(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + string url = default; + string eTag = default; + DateTimeOffset? lastModified = default; + DateTimeOffset? creationTime = default; + BatchJobScheduleState? state = default; + DateTimeOffset? stateTransitionTime = default; + BatchJobScheduleState? previousState = default; + DateTimeOffset? previousStateTransitionTime = default; + BatchJobScheduleConfiguration schedule = default; + BatchJobSpecification jobSpecification = default; + BatchJobScheduleExecutionInfo executionInfo = default; + IList metadata = default; + BatchJobScheduleStatistics stats = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("eTag"u8)) + { + eTag = property.Value.GetString(); + continue; + } + if (property.NameEquals("lastModified"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastModified = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("creationTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + creationTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchJobScheduleState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("previousState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousState = new BatchJobScheduleState(property.Value.GetString()); + continue; + } + if (property.NameEquals("previousStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("schedule"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + schedule = BatchJobScheduleConfiguration.DeserializeBatchJobScheduleConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("jobSpecification"u8)) + { + jobSpecification = BatchJobSpecification.DeserializeBatchJobSpecification(property.Value, options); + continue; + } + if (property.NameEquals("executionInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + executionInfo = BatchJobScheduleExecutionInfo.DeserializeBatchJobScheduleExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("stats"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stats = BatchJobScheduleStatistics.DeserializeBatchJobScheduleStatistics(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobSchedule( + id, + displayName, + url, + eTag, + lastModified, + creationTime, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + schedule, + jobSpecification, + executionInfo, + metadata ?? new ChangeTrackingList(), + stats, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobSchedule)} does not support writing '{options.Format}' format."); + } + } + + BatchJobSchedule IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobSchedule(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobSchedule)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobSchedule FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobSchedule(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs new file mode 100644 index 0000000000000..9dffba45d20ba --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a + /// specification used to create each Job. + /// + public partial class BatchJobSchedule + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The details of the Jobs to be created on this schedule. + /// is null. + public BatchJobSchedule(BatchJobSpecification jobSpecification) + { + Argument.AssertNotNull(jobSpecification, nameof(jobSpecification)); + + JobSpecification = jobSpecification; + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the schedule within the Account. + /// The display name for the schedule. + /// The URL of the Job Schedule. + /// The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. + /// The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. + /// The creation time of the Job Schedule. + /// The current state of the Job Schedule. + /// The time at which the Job Schedule entered the current state. + /// The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. + /// The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + /// The details of the Jobs to be created on this schedule. + /// Information about Jobs that have been and will be run under this schedule. + /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Keeps track of any properties unknown to the library. + internal BatchJobSchedule(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobScheduleState? state, DateTimeOffset? stateTransitionTime, BatchJobScheduleState? previousState, DateTimeOffset? previousStateTransitionTime, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, BatchJobScheduleExecutionInfo executionInfo, IList metadata, BatchJobScheduleStatistics stats, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + Url = url; + ETag = eTag; + LastModified = lastModified; + CreationTime = creationTime; + State = state; + StateTransitionTime = stateTransitionTime; + PreviousState = previousState; + PreviousStateTransitionTime = previousStateTransitionTime; + Schedule = schedule; + JobSpecification = jobSpecification; + ExecutionInfo = executionInfo; + Metadata = metadata; + Stats = stats; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobSchedule() + { + } + + /// A string that uniquely identifies the schedule within the Account. + public string Id { get; } + /// The display name for the schedule. + public string DisplayName { get; } + /// The URL of the Job Schedule. + public string Url { get; } + /// The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. + public string ETag { get; } + /// The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. + public DateTimeOffset? LastModified { get; } + /// The creation time of the Job Schedule. + public DateTimeOffset? CreationTime { get; } + /// The current state of the Job Schedule. + public BatchJobScheduleState? State { get; } + /// The time at which the Job Schedule entered the current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. + public BatchJobScheduleState? PreviousState { get; } + /// The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. + public DateTimeOffset? PreviousStateTransitionTime { get; } + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + public BatchJobScheduleConfiguration Schedule { get; set; } + /// The details of the Jobs to be created on this schedule. + public BatchJobSpecification JobSpecification { get; set; } + /// Information about Jobs that have been and will be run under this schedule. + public BatchJobScheduleExecutionInfo ExecutionInfo { get; } + /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + public BatchJobScheduleStatistics Stats { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleConfiguration.Serialization.cs new file mode 100644 index 0000000000000..9565ce4d9fb8f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleConfiguration.Serialization.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobScheduleConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(DoNotRunUntil)) + { + writer.WritePropertyName("doNotRunUntil"u8); + writer.WriteStringValue(DoNotRunUntil.Value, "O"); + } + if (Optional.IsDefined(DoNotRunAfter)) + { + writer.WritePropertyName("doNotRunAfter"u8); + writer.WriteStringValue(DoNotRunAfter.Value, "O"); + } + if (Optional.IsDefined(StartWindow)) + { + writer.WritePropertyName("startWindow"u8); + writer.WriteStringValue(StartWindow.Value, "P"); + } + if (Optional.IsDefined(RecurrenceInterval)) + { + writer.WritePropertyName("recurrenceInterval"u8); + writer.WriteStringValue(RecurrenceInterval.Value, "P"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobScheduleConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobScheduleConfiguration(document.RootElement, options); + } + + internal static BatchJobScheduleConfiguration DeserializeBatchJobScheduleConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset? doNotRunUntil = default; + DateTimeOffset? doNotRunAfter = default; + TimeSpan? startWindow = default; + TimeSpan? recurrenceInterval = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("doNotRunUntil"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + doNotRunUntil = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("doNotRunAfter"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + doNotRunAfter = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("startWindow"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startWindow = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("recurrenceInterval"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + recurrenceInterval = property.Value.GetTimeSpan("P"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobScheduleConfiguration(doNotRunUntil, doNotRunAfter, startWindow, recurrenceInterval, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobScheduleConfiguration)} does not support writing '{options.Format}' format."); + } + } + + BatchJobScheduleConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobScheduleConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobScheduleConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobScheduleConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobScheduleConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleConfiguration.cs new file mode 100644 index 0000000000000..a3d035fef9de7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleConfiguration.cs @@ -0,0 +1,80 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The schedule according to which Jobs will be created. All times are fixed + /// respective to UTC and are not impacted by daylight saving time. + /// + public partial class BatchJobScheduleConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchJobScheduleConfiguration() + { + } + + /// Initializes a new instance of . + /// The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. + /// A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. + /// The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Keeps track of any properties unknown to the library. + internal BatchJobScheduleConfiguration(DateTimeOffset? doNotRunUntil, DateTimeOffset? doNotRunAfter, TimeSpan? startWindow, TimeSpan? recurrenceInterval, IDictionary serializedAdditionalRawData) + { + DoNotRunUntil = doNotRunUntil; + DoNotRunAfter = doNotRunAfter; + StartWindow = startWindow; + RecurrenceInterval = recurrenceInterval; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs immediately. + public DateTimeOffset? DoNotRunUntil { get; set; } + /// A time after which no Job will be created under this Job Schedule. The schedule will move to the completed state as soon as this deadline is past and there is no active Job under this Job Schedule. If you do not specify a doNotRunAfter time, and you are creating a recurring Job Schedule, the Job Schedule will remain active until you explicitly terminate it. + public DateTimeOffset? DoNotRunAfter { get; set; } + /// The time interval, starting from the time at which the schedule indicates a Job should be created, within which a Job must be created. If a Job is not created within the startWindow interval, then the 'opportunity' is lost; no Job will be created until the next recurrence of the schedule. If the schedule is recurring, and the startWindow is longer than the recurrence interval, then this is equivalent to an infinite startWindow, because the Job that is 'due' in one recurrenceInterval is not carried forward into the next recurrence interval. The default is infinite. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? StartWindow { get; set; } + /// The time interval between the start times of two successive Jobs under the Job Schedule. A Job Schedule can have at most one active Job under it at any given time. Because a Job Schedule can have at most one active Job under it at any given time, if it is time to create a new Job under a Job Schedule, but the previous Job is still running, the Batch service will not create the new Job until the previous Job finishes. If the previous Job does not finish within the startWindow period of the new recurrenceInterval, then no new Job will be scheduled for that interval. For recurring Jobs, you should normally specify a jobManagerTask in the jobSpecification. If you do not use jobManagerTask, you will need an external process to monitor when Jobs are created, add Tasks to the Jobs and terminate the Jobs ready for the next recurrence. The default is that the schedule does not recur: one Job is created, within the startWindow after the doNotRunUntil time, and the schedule is complete as soon as that Job finishes. The minimum value is 1 minute. If you specify a lower value, the Batch service rejects the schedule with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? RecurrenceInterval { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.Serialization.cs new file mode 100644 index 0000000000000..c92614a3fc9f3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.Serialization.cs @@ -0,0 +1,193 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobScheduleCreateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + writer.WritePropertyName("schedule"u8); + writer.WriteObjectValue(Schedule, options); + writer.WritePropertyName("jobSpecification"u8); + writer.WriteObjectValue(JobSpecification, options); + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobScheduleCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobScheduleCreateContent(document.RootElement, options); + } + + internal static BatchJobScheduleCreateContent DeserializeBatchJobScheduleCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + BatchJobScheduleConfiguration schedule = default; + BatchJobSpecification jobSpecification = default; + IList metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("schedule"u8)) + { + schedule = BatchJobScheduleConfiguration.DeserializeBatchJobScheduleConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("jobSpecification"u8)) + { + jobSpecification = BatchJobSpecification.DeserializeBatchJobSpecification(property.Value, options); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobScheduleCreateContent( + id, + displayName, + schedule, + jobSpecification, + metadata ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchJobScheduleCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobScheduleCreateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobScheduleCreateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobScheduleCreateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.cs new file mode 100644 index 0000000000000..624f65419a5e3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.cs @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for creating an Azure Batch Job Schedule. + public partial class BatchJobScheduleCreateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + /// The details of the Jobs to be created on this schedule. + /// , or is null. + public BatchJobScheduleCreateContent(string id, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(schedule, nameof(schedule)); + Argument.AssertNotNull(jobSpecification, nameof(jobSpecification)); + + Id = id; + Schedule = schedule; + JobSpecification = jobSpecification; + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + /// The details of the Jobs to be created on this schedule. + /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// Keeps track of any properties unknown to the library. + internal BatchJobScheduleCreateContent(string id, string displayName, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, IList metadata, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + Schedule = schedule; + JobSpecification = jobSpecification; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobScheduleCreateContent() + { + } + + /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + public string Id { get; } + /// The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + public BatchJobScheduleConfiguration Schedule { get; } + /// The details of the Jobs to be created on this schedule. + public BatchJobSpecification JobSpecification { get; } + /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleExecutionInfo.Serialization.cs new file mode 100644 index 0000000000000..e467ae34e11d9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleExecutionInfo.Serialization.cs @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobScheduleExecutionInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleExecutionInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(NextRunTime)) + { + writer.WritePropertyName("nextRunTime"u8); + writer.WriteStringValue(NextRunTime.Value, "O"); + } + if (Optional.IsDefined(RecentJob)) + { + writer.WritePropertyName("recentJob"u8); + writer.WriteObjectValue(RecentJob, options); + } + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobScheduleExecutionInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleExecutionInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobScheduleExecutionInfo(document.RootElement, options); + } + + internal static BatchJobScheduleExecutionInfo DeserializeBatchJobScheduleExecutionInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset? nextRunTime = default; + RecentBatchJob recentJob = default; + DateTimeOffset? endTime = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nextRunTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nextRunTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("recentJob"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + recentJob = RecentBatchJob.DeserializeRecentBatchJob(property.Value, options); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobScheduleExecutionInfo(nextRunTime, recentJob, endTime, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobScheduleExecutionInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchJobScheduleExecutionInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobScheduleExecutionInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobScheduleExecutionInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobScheduleExecutionInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobScheduleExecutionInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleExecutionInfo.cs new file mode 100644 index 0000000000000..312429c1b8d33 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleExecutionInfo.cs @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Contains information about Jobs that have been and will be run under a Job + /// Schedule. + /// + public partial class BatchJobScheduleExecutionInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchJobScheduleExecutionInfo() + { + } + + /// Initializes a new instance of . + /// The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then. + /// Information about the most recent Job under the Job Schedule. This property is present only if the at least one Job has run under the schedule. + /// The time at which the schedule ended. This property is set only if the Job Schedule is in the completed state. + /// Keeps track of any properties unknown to the library. + internal BatchJobScheduleExecutionInfo(DateTimeOffset? nextRunTime, RecentBatchJob recentJob, DateTimeOffset? endTime, IDictionary serializedAdditionalRawData) + { + NextRunTime = nextRunTime; + RecentJob = recentJob; + EndTime = endTime; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. For example, if the schedule is disabled, no Job will be created at nextRunTime unless the Job is enabled before then. + public DateTimeOffset? NextRunTime { get; set; } + /// Information about the most recent Job under the Job Schedule. This property is present only if the at least one Job has run under the schedule. + public RecentBatchJob RecentJob { get; set; } + /// The time at which the schedule ended. This property is set only if the Job Schedule is in the completed state. + public DateTimeOffset? EndTime { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleState.cs new file mode 100644 index 0000000000000..9ca8728cd1146 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleState.cs @@ -0,0 +1,60 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchJobScheduleState enums. + public readonly partial struct BatchJobScheduleState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchJobScheduleState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ActiveValue = "active"; + private const string CompletedValue = "completed"; + private const string DisabledValue = "disabled"; + private const string TerminatingValue = "terminating"; + private const string DeletingValue = "deleting"; + + /// The Job Schedule is active and will create Jobs as per its schedule. + public static BatchJobScheduleState Active { get; } = new BatchJobScheduleState(ActiveValue); + /// The Job Schedule has terminated, either by reaching its end time or by the user terminating it explicitly. + public static BatchJobScheduleState Completed { get; } = new BatchJobScheduleState(CompletedValue); + /// The user has disabled the Job Schedule. The scheduler will not initiate any new Jobs will on this schedule, but any existing active Job will continue to run. + public static BatchJobScheduleState Disabled { get; } = new BatchJobScheduleState(DisabledValue); + /// The Job Schedule has no more work to do, or has been explicitly terminated by the user, but the termination operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, nor is any existing Job active. + public static BatchJobScheduleState Terminating { get; } = new BatchJobScheduleState(TerminatingValue); + /// The user has requested that the Job Schedule be deleted, but the delete operation is still in progress. The scheduler will not initiate any new Jobs for this Job Schedule, and will delete any existing Jobs and Tasks under the Job Schedule, including any active Job. The Job Schedule will be deleted when all Jobs and Tasks under the Job Schedule have been deleted. + public static BatchJobScheduleState Deleting { get; } = new BatchJobScheduleState(DeletingValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobScheduleState left, BatchJobScheduleState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobScheduleState left, BatchJobScheduleState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchJobScheduleState(string value) => new BatchJobScheduleState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchJobScheduleState other && Equals(other); + /// + public bool Equals(BatchJobScheduleState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs new file mode 100644 index 0000000000000..c5a3c95add21f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs @@ -0,0 +1,254 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobScheduleStatistics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleStatistics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + writer.WritePropertyName("userCPUTime"u8); + writer.WriteStringValue(UserCpuTime, "P"); + writer.WritePropertyName("kernelCPUTime"u8); + writer.WriteStringValue(KernelCpuTime, "P"); + writer.WritePropertyName("wallClockTime"u8); + writer.WriteStringValue(WallClockTime, "P"); + writer.WritePropertyName("readIOps"u8); + writer.WriteNumberValue(ReadIOps); + writer.WritePropertyName("writeIOps"u8); + writer.WriteNumberValue(WriteIOps); + writer.WritePropertyName("readIOGiB"u8); + writer.WriteNumberValue(ReadIOGiB); + writer.WritePropertyName("writeIOGiB"u8); + writer.WriteNumberValue(WriteIOGiB); + writer.WritePropertyName("numSucceededTasks"u8); + writer.WriteNumberValue(NumSucceededTasks); + writer.WritePropertyName("numFailedTasks"u8); + writer.WriteNumberValue(NumFailedTasks); + writer.WritePropertyName("numTaskRetries"u8); + writer.WriteNumberValue(NumTaskRetries); + writer.WritePropertyName("waitTime"u8); + writer.WriteStringValue(WaitTime, "P"); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobScheduleStatistics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleStatistics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobScheduleStatistics(document.RootElement, options); + } + + internal static BatchJobScheduleStatistics DeserializeBatchJobScheduleStatistics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string url = default; + DateTimeOffset startTime = default; + DateTimeOffset lastUpdateTime = default; + TimeSpan userCPUTime = default; + TimeSpan kernelCPUTime = default; + TimeSpan wallClockTime = default; + long readIOps = default; + long writeIOps = default; + float readIOGiB = default; + float writeIOGiB = default; + long numSucceededTasks = default; + long numFailedTasks = default; + long numTaskRetries = default; + TimeSpan waitTime = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("userCPUTime"u8)) + { + userCPUTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("kernelCPUTime"u8)) + { + kernelCPUTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("wallClockTime"u8)) + { + wallClockTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("readIOps"u8)) + { + readIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("writeIOps"u8)) + { + writeIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("readIOGiB"u8)) + { + readIOGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("writeIOGiB"u8)) + { + writeIOGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("numSucceededTasks"u8)) + { + numSucceededTasks = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("numFailedTasks"u8)) + { + numFailedTasks = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("numTaskRetries"u8)) + { + numTaskRetries = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("waitTime"u8)) + { + waitTime = property.Value.GetTimeSpan("P"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobScheduleStatistics( + url, + startTime, + lastUpdateTime, + userCPUTime, + kernelCPUTime, + wallClockTime, + readIOps, + writeIOps, + readIOGiB, + writeIOGiB, + numSucceededTasks, + numFailedTasks, + numTaskRetries, + waitTime, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobScheduleStatistics)} does not support writing '{options.Format}' format."); + } + } + + BatchJobScheduleStatistics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobScheduleStatistics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobScheduleStatistics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobScheduleStatistics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobScheduleStatistics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs new file mode 100644 index 0000000000000..5b3dd8aa78397 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Resource usage statistics for a Job Schedule. + public partial class BatchJobScheduleStatistics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URL of the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. + /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. + /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. + /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. + /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. + /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. + /// is null. + public BatchJobScheduleStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime) + { + Argument.AssertNotNull(url, nameof(url)); + + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UserCpuTime = userCpuTime; + KernelCpuTime = kernelCpuTime; + WallClockTime = wallClockTime; + ReadIOps = readIOps; + WriteIOps = writeIOps; + ReadIOGiB = readIOGiB; + WriteIOGiB = writeIOGiB; + NumSucceededTasks = numSucceededTasks; + NumFailedTasks = numFailedTasks; + NumTaskRetries = numTaskRetries; + WaitTime = waitTime; + } + + /// Initializes a new instance of . + /// The URL of the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. + /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. + /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. + /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. + /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. + /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. + /// Keeps track of any properties unknown to the library. + internal BatchJobScheduleStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime, IDictionary serializedAdditionalRawData) + { + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UserCpuTime = userCpuTime; + KernelCpuTime = kernelCpuTime; + WallClockTime = wallClockTime; + ReadIOps = readIOps; + WriteIOps = writeIOps; + ReadIOGiB = readIOGiB; + WriteIOGiB = writeIOGiB; + NumSucceededTasks = numSucceededTasks; + NumFailedTasks = numFailedTasks; + NumTaskRetries = numTaskRetries; + WaitTime = waitTime; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobScheduleStatistics() + { + } + + /// The URL of the statistics. + public string Url { get; set; } + /// The start time of the time range covered by the statistics. + public DateTimeOffset StartTime { get; set; } + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + public DateTimeOffset LastUpdateTime { get; set; } + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. + public TimeSpan UserCpuTime { get; set; } + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. + public TimeSpan KernelCpuTime { get; set; } + /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. + public TimeSpan WallClockTime { get; set; } + /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. + public long ReadIOps { get; set; } + /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. + public long WriteIOps { get; set; } + /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + public float ReadIOGiB { get; set; } + /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + public float WriteIOGiB { get; set; } + /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. + public long NumSucceededTasks { get; set; } + /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + public long NumFailedTasks { get; set; } + /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. + public long NumTaskRetries { get; set; } + /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. + public TimeSpan WaitTime { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.Serialization.cs new file mode 100644 index 0000000000000..eeea81dd97f28 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.Serialization.cs @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobScheduleUpdateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Schedule)) + { + writer.WritePropertyName("schedule"u8); + writer.WriteObjectValue(Schedule, options); + } + if (Optional.IsDefined(JobSpecification)) + { + writer.WritePropertyName("jobSpecification"u8); + writer.WriteObjectValue(JobSpecification, options); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobScheduleUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobScheduleUpdateContent(document.RootElement, options); + } + + internal static BatchJobScheduleUpdateContent DeserializeBatchJobScheduleUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchJobScheduleConfiguration schedule = default; + BatchJobSpecification jobSpecification = default; + IList metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("schedule"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + schedule = BatchJobScheduleConfiguration.DeserializeBatchJobScheduleConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("jobSpecification"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobSpecification = BatchJobSpecification.DeserializeBatchJobSpecification(property.Value, options); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobScheduleUpdateContent(schedule, jobSpecification, metadata ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchJobScheduleUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobScheduleUpdateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobScheduleUpdateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobScheduleUpdateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.cs new file mode 100644 index 0000000000000..6136aa6bbdc4f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating an Azure Batch Job Schedule. + public partial class BatchJobScheduleUpdateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchJobScheduleUpdateContent() + { + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. + /// The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. + /// A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. + /// Keeps track of any properties unknown to the library. + internal BatchJobScheduleUpdateContent(BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, IList metadata, IDictionary serializedAdditionalRawData) + { + Schedule = schedule; + JobSpecification = jobSpecification; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. + public BatchJobScheduleConfiguration Schedule { get; set; } + /// The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. + public BatchJobSpecification JobSpecification { get; set; } + /// A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. + public IList Metadata { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs new file mode 100644 index 0000000000000..8e840beba7884 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobSchedulingError : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobSchedulingError)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("category"u8); + writer.WriteStringValue(Category.ToString()); + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsCollectionDefined(Details)) + { + writer.WritePropertyName("details"u8); + writer.WriteStartArray(); + foreach (var item in Details) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobSchedulingError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobSchedulingError)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobSchedulingError(document.RootElement, options); + } + + internal static BatchJobSchedulingError DeserializeBatchJobSchedulingError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ErrorCategory category = default; + string code = default; + string message = default; + IList details = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("category"u8)) + { + category = new ErrorCategory(property.Value.GetString()); + continue; + } + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("details"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NameValuePair.DeserializeNameValuePair(item, options)); + } + details = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobSchedulingError(category, code, message, details ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobSchedulingError)} does not support writing '{options.Format}' format."); + } + } + + BatchJobSchedulingError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobSchedulingError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobSchedulingError)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobSchedulingError FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobSchedulingError(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs new file mode 100644 index 0000000000000..46fee6e6a60f7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error encountered by the Batch service when scheduling a Job. + public partial class BatchJobSchedulingError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The category of the Job scheduling error. + public BatchJobSchedulingError(ErrorCategory category) + { + Category = category; + Details = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The category of the Job scheduling error. + /// An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Job scheduling error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the scheduling error. + /// Keeps track of any properties unknown to the library. + internal BatchJobSchedulingError(ErrorCategory category, string code, string message, IList details, IDictionary serializedAdditionalRawData) + { + Category = category; + Code = code; + Message = message; + Details = details; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobSchedulingError() + { + } + + /// The category of the Job scheduling error. + public ErrorCategory Category { get; set; } + /// An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; set; } + /// A message describing the Job scheduling error, intended to be suitable for display in a user interface. + public string Message { get; set; } + /// A list of additional error details related to the scheduling error. + public IList Details { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs new file mode 100644 index 0000000000000..c55ac93bd1225 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs @@ -0,0 +1,377 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobSpecification : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobSpecification)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Priority)) + { + writer.WritePropertyName("priority"u8); + writer.WriteNumberValue(Priority.Value); + } + if (Optional.IsDefined(AllowTaskPreemption)) + { + writer.WritePropertyName("allowTaskPreemption"u8); + writer.WriteBooleanValue(AllowTaskPreemption.Value); + } + if (Optional.IsDefined(MaxParallelTasks)) + { + writer.WritePropertyName("maxParallelTasks"u8); + writer.WriteNumberValue(MaxParallelTasks.Value); + } + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (Optional.IsDefined(UsesTaskDependencies)) + { + writer.WritePropertyName("usesTaskDependencies"u8); + writer.WriteBooleanValue(UsesTaskDependencies.Value); + } + if (Optional.IsDefined(OnAllTasksComplete)) + { + writer.WritePropertyName("onAllTasksComplete"u8); + writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + } + if (Optional.IsDefined(OnTaskFailure)) + { + writer.WritePropertyName("onTaskFailure"u8); + writer.WriteStringValue(OnTaskFailure.Value.ToString()); + } + if (Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (Optional.IsDefined(JobManagerTask)) + { + writer.WritePropertyName("jobManagerTask"u8); + writer.WriteObjectValue(JobManagerTask, options); + } + if (Optional.IsDefined(JobPreparationTask)) + { + writer.WritePropertyName("jobPreparationTask"u8); + writer.WriteObjectValue(JobPreparationTask, options); + } + if (Optional.IsDefined(JobReleaseTask)) + { + writer.WritePropertyName("jobReleaseTask"u8); + writer.WriteObjectValue(JobReleaseTask, options); + } + if (Optional.IsCollectionDefined(CommonEnvironmentSettings)) + { + writer.WritePropertyName("commonEnvironmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in CommonEnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + writer.WritePropertyName("poolInfo"u8); + writer.WriteObjectValue(PoolInfo, options); + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobSpecification IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobSpecification)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobSpecification(document.RootElement, options); + } + + internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int? priority = default; + bool? allowTaskPreemption = default; + int? maxParallelTasks = default; + string displayName = default; + bool? usesTaskDependencies = default; + OnAllBatchTasksComplete? onAllTasksComplete = default; + OnBatchTaskFailure? onTaskFailure = default; + BatchJobNetworkConfiguration networkConfiguration = default; + BatchJobConstraints constraints = default; + BatchJobManagerTask jobManagerTask = default; + BatchJobPreparationTask jobPreparationTask = default; + BatchJobReleaseTask jobReleaseTask = default; + IList commonEnvironmentSettings = default; + BatchPoolInfo poolInfo = default; + IList metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("priority"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + priority = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("allowTaskPreemption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allowTaskPreemption = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("maxParallelTasks"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxParallelTasks = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("usesTaskDependencies"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usesTaskDependencies = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("onAllTasksComplete"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + continue; + } + if (property.NameEquals("onTaskFailure"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onTaskFailure = new OnBatchTaskFailure(property.Value.GetString()); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = BatchJobNetworkConfiguration.DeserializeBatchJobNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchJobConstraints.DeserializeBatchJobConstraints(property.Value, options); + continue; + } + if (property.NameEquals("jobManagerTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobManagerTask = BatchJobManagerTask.DeserializeBatchJobManagerTask(property.Value, options); + continue; + } + if (property.NameEquals("jobPreparationTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobPreparationTask = BatchJobPreparationTask.DeserializeBatchJobPreparationTask(property.Value, options); + continue; + } + if (property.NameEquals("jobReleaseTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobReleaseTask = BatchJobReleaseTask.DeserializeBatchJobReleaseTask(property.Value, options); + continue; + } + if (property.NameEquals("commonEnvironmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + commonEnvironmentSettings = array; + continue; + } + if (property.NameEquals("poolInfo"u8)) + { + poolInfo = BatchPoolInfo.DeserializeBatchPoolInfo(property.Value, options); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobSpecification( + priority, + allowTaskPreemption, + maxParallelTasks, + displayName, + usesTaskDependencies, + onAllTasksComplete, + onTaskFailure, + networkConfiguration, + constraints, + jobManagerTask, + jobPreparationTask, + jobReleaseTask, + commonEnvironmentSettings ?? new ChangeTrackingList(), + poolInfo, + metadata ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobSpecification)} does not support writing '{options.Format}' format."); + } + } + + BatchJobSpecification IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobSpecification(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobSpecification)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobSpecification FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobSpecification(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs new file mode 100644 index 0000000000000..692c7b69ba717 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies details of the Jobs to be created on a schedule. + public partial class BatchJobSpecification + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. + /// is null. + public BatchJobSpecification(BatchPoolInfo poolInfo) + { + Argument.AssertNotNull(poolInfo, nameof(poolInfo)); + + CommonEnvironmentSettings = new ChangeTrackingList(); + PoolInfo = poolInfo; + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + /// The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The network configuration for the Job. + /// The execution constraints for Jobs created under this schedule. + /// The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. + /// The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. + /// The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. + /// A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + /// The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. + /// A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// Keeps track of any properties unknown to the library. + internal BatchJobSpecification(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, string displayName, bool? usesTaskDependencies, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, IList metadata, IDictionary serializedAdditionalRawData) + { + Priority = priority; + AllowTaskPreemption = allowTaskPreemption; + MaxParallelTasks = maxParallelTasks; + DisplayName = displayName; + UsesTaskDependencies = usesTaskDependencies; + OnAllTasksComplete = onAllTasksComplete; + OnTaskFailure = onTaskFailure; + NetworkConfiguration = networkConfiguration; + Constraints = constraints; + JobManagerTask = jobManagerTask; + JobPreparationTask = jobPreparationTask; + JobReleaseTask = jobReleaseTask; + CommonEnvironmentSettings = commonEnvironmentSettings; + PoolInfo = poolInfo; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobSpecification() + { + } + + /// The priority of Jobs created under this schedule. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. This priority is used as the default for all Jobs under the Job Schedule. You can update a Job's priority after it has been created using by using the update Job API. + public int? Priority { get; set; } + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + public bool? AllowTaskPreemption { get; set; } + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + public int? MaxParallelTasks { get; set; } + /// The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + public bool? UsesTaskDependencies { get; set; } + /// The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + public OnBatchTaskFailure? OnTaskFailure { get; set; } + /// The network configuration for the Job. + public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } + /// The execution constraints for Jobs created under this schedule. + public BatchJobConstraints Constraints { get; set; } + /// The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. + public BatchJobManagerTask JobManagerTask { get; set; } + /// The Job Preparation Task for Jobs created under this schedule. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. + public BatchJobPreparationTask JobPreparationTask { get; set; } + /// The Job Release Task for Jobs created under this schedule. The primary purpose of the Job Release Task is to undo changes to Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Compute Nodes that have run the Job Preparation Task. + public BatchJobReleaseTask JobReleaseTask { get; set; } + /// A list of common environment variable settings. These environment variables are set for all Tasks in Jobs created under this schedule (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + public IList CommonEnvironmentSettings { get; } + /// The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. + public BatchPoolInfo PoolInfo { get; set; } + /// A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobState.cs new file mode 100644 index 0000000000000..50d7865a9b39c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobState.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchJobState enums. + public readonly partial struct BatchJobState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchJobState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ActiveValue = "active"; + private const string DisablingValue = "disabling"; + private const string DisabledValue = "disabled"; + private const string EnablingValue = "enabling"; + private const string TerminatingValue = "terminating"; + private const string CompletedValue = "completed"; + private const string DeletingValue = "deleting"; + + /// The Job is available to have Tasks scheduled. + public static BatchJobState Active { get; } = new BatchJobState(ActiveValue); + /// A user has requested that the Job be disabled, but the disable operation is still in progress (for example, waiting for Tasks to terminate). + public static BatchJobState Disabling { get; } = new BatchJobState(DisablingValue); + /// A user has disabled the Job. No Tasks are running, and no new Tasks will be scheduled. + public static BatchJobState Disabled { get; } = new BatchJobState(DisabledValue); + /// A user has requested that the Job be enabled, but the enable operation is still in progress. + public static BatchJobState Enabling { get; } = new BatchJobState(EnablingValue); + /// The Job is about to complete, either because a Job Manager Task has completed or because the user has terminated the Job, but the terminate operation is still in progress (for example, because Job Release Tasks are running). + public static BatchJobState Terminating { get; } = new BatchJobState(TerminatingValue); + /// All Tasks have terminated, and the system will not accept any more Tasks or any further changes to the Job. + public static BatchJobState Completed { get; } = new BatchJobState(CompletedValue); + /// A user has requested that the Job be deleted, but the delete operation is still in progress (for example, because the system is still terminating running Tasks). + public static BatchJobState Deleting { get; } = new BatchJobState(DeletingValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobState left, BatchJobState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobState left, BatchJobState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchJobState(string value) => new BatchJobState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchJobState other && Equals(other); + /// + public bool Equals(BatchJobState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs new file mode 100644 index 0000000000000..27a9f31eaf0c0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs @@ -0,0 +1,254 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobStatistics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobStatistics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + writer.WritePropertyName("userCPUTime"u8); + writer.WriteStringValue(UserCpuTime, "P"); + writer.WritePropertyName("kernelCPUTime"u8); + writer.WriteStringValue(KernelCpuTime, "P"); + writer.WritePropertyName("wallClockTime"u8); + writer.WriteStringValue(WallClockTime, "P"); + writer.WritePropertyName("readIOps"u8); + writer.WriteNumberValue(ReadIOps); + writer.WritePropertyName("writeIOps"u8); + writer.WriteNumberValue(WriteIOps); + writer.WritePropertyName("readIOGiB"u8); + writer.WriteNumberValue(ReadIOGiB); + writer.WritePropertyName("writeIOGiB"u8); + writer.WriteNumberValue(WriteIOGiB); + writer.WritePropertyName("numSucceededTasks"u8); + writer.WriteNumberValue(NumSucceededTasks); + writer.WritePropertyName("numFailedTasks"u8); + writer.WriteNumberValue(NumFailedTasks); + writer.WritePropertyName("numTaskRetries"u8); + writer.WriteNumberValue(NumTaskRetries); + writer.WritePropertyName("waitTime"u8); + writer.WriteStringValue(WaitTime, "P"); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobStatistics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobStatistics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobStatistics(document.RootElement, options); + } + + internal static BatchJobStatistics DeserializeBatchJobStatistics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string url = default; + DateTimeOffset startTime = default; + DateTimeOffset lastUpdateTime = default; + TimeSpan userCPUTime = default; + TimeSpan kernelCPUTime = default; + TimeSpan wallClockTime = default; + long readIOps = default; + long writeIOps = default; + float readIOGiB = default; + float writeIOGiB = default; + long numSucceededTasks = default; + long numFailedTasks = default; + long numTaskRetries = default; + TimeSpan waitTime = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("userCPUTime"u8)) + { + userCPUTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("kernelCPUTime"u8)) + { + kernelCPUTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("wallClockTime"u8)) + { + wallClockTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("readIOps"u8)) + { + readIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("writeIOps"u8)) + { + writeIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("readIOGiB"u8)) + { + readIOGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("writeIOGiB"u8)) + { + writeIOGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("numSucceededTasks"u8)) + { + numSucceededTasks = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("numFailedTasks"u8)) + { + numFailedTasks = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("numTaskRetries"u8)) + { + numTaskRetries = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("waitTime"u8)) + { + waitTime = property.Value.GetTimeSpan("P"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobStatistics( + url, + startTime, + lastUpdateTime, + userCPUTime, + kernelCPUTime, + wallClockTime, + readIOps, + writeIOps, + readIOGiB, + writeIOGiB, + numSucceededTasks, + numFailedTasks, + numTaskRetries, + waitTime, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobStatistics)} does not support writing '{options.Format}' format."); + } + } + + BatchJobStatistics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobStatistics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobStatistics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobStatistics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobStatistics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs new file mode 100644 index 0000000000000..02388898eb670 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Resource usage statistics for a Job. + public partial class BatchJobStatistics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URL of the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. + /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. + /// The total number of disk read operations made by all Tasks in the Job. + /// The total number of disk write operations made by all Tasks in the Job. + /// The total amount of data in GiB read from disk by all Tasks in the Job. + /// The total amount of data in GiB written to disk by all Tasks in the Job. + /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries on all the Tasks in the Job during the given time range. + /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. + /// is null. + public BatchJobStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime) + { + Argument.AssertNotNull(url, nameof(url)); + + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UserCpuTime = userCpuTime; + KernelCpuTime = kernelCpuTime; + WallClockTime = wallClockTime; + ReadIOps = readIOps; + WriteIOps = writeIOps; + ReadIOGiB = readIOGiB; + WriteIOGiB = writeIOGiB; + NumSucceededTasks = numSucceededTasks; + NumFailedTasks = numFailedTasks; + NumTaskRetries = numTaskRetries; + WaitTime = waitTime; + } + + /// Initializes a new instance of . + /// The URL of the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. + /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. + /// The total number of disk read operations made by all Tasks in the Job. + /// The total number of disk write operations made by all Tasks in the Job. + /// The total amount of data in GiB read from disk by all Tasks in the Job. + /// The total amount of data in GiB written to disk by all Tasks in the Job. + /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries on all the Tasks in the Job during the given time range. + /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. + /// Keeps track of any properties unknown to the library. + internal BatchJobStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime, IDictionary serializedAdditionalRawData) + { + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UserCpuTime = userCpuTime; + KernelCpuTime = kernelCpuTime; + WallClockTime = wallClockTime; + ReadIOps = readIOps; + WriteIOps = writeIOps; + ReadIOGiB = readIOGiB; + WriteIOGiB = writeIOGiB; + NumSucceededTasks = numSucceededTasks; + NumFailedTasks = numFailedTasks; + NumTaskRetries = numTaskRetries; + WaitTime = waitTime; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchJobStatistics() + { + } + + /// The URL of the statistics. + public string Url { get; set; } + /// The start time of the time range covered by the statistics. + public DateTimeOffset StartTime { get; set; } + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + public DateTimeOffset LastUpdateTime { get; set; } + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. + public TimeSpan UserCpuTime { get; set; } + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. + public TimeSpan KernelCpuTime { get; set; } + /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. + public TimeSpan WallClockTime { get; set; } + /// The total number of disk read operations made by all Tasks in the Job. + public long ReadIOps { get; set; } + /// The total number of disk write operations made by all Tasks in the Job. + public long WriteIOps { get; set; } + /// The total amount of data in GiB read from disk by all Tasks in the Job. + public float ReadIOGiB { get; set; } + /// The total amount of data in GiB written to disk by all Tasks in the Job. + public float WriteIOGiB { get; set; } + /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. + public long NumSucceededTasks { get; set; } + /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + public long NumFailedTasks { get; set; } + /// The total number of retries on all the Tasks in the Job during the given time range. + public long NumTaskRetries { get; set; } + /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. + public TimeSpan WaitTime { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.Serialization.cs new file mode 100644 index 0000000000000..72f3d8bd44fa2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.Serialization.cs @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobTerminateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(TerminationReason)) + { + writer.WritePropertyName("terminateReason"u8); + writer.WriteStringValue(TerminationReason); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobTerminateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobTerminateContent(document.RootElement, options); + } + + internal static BatchJobTerminateContent DeserializeBatchJobTerminateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string terminateReason = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("terminateReason"u8)) + { + terminateReason = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobTerminateContent(terminateReason, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchJobTerminateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobTerminateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobTerminateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobTerminateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.cs new file mode 100644 index 0000000000000..537722bd26452 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for terminating an Azure Batch Job. + public partial class BatchJobTerminateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchJobTerminateContent() + { + } + + /// Initializes a new instance of . + /// The text you want to appear as the Job's TerminationReason. The default is 'UserTerminate'. + /// Keeps track of any properties unknown to the library. + internal BatchJobTerminateContent(string terminationReason, IDictionary serializedAdditionalRawData) + { + TerminationReason = terminationReason; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The text you want to appear as the Job's TerminationReason. The default is 'UserTerminate'. + public string TerminationReason { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs new file mode 100644 index 0000000000000..6f849ee7b5853 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchJobUpdateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Priority)) + { + writer.WritePropertyName("priority"u8); + writer.WriteNumberValue(Priority.Value); + } + if (Optional.IsDefined(AllowTaskPreemption)) + { + writer.WritePropertyName("allowTaskPreemption"u8); + writer.WriteBooleanValue(AllowTaskPreemption.Value); + } + if (Optional.IsDefined(MaxParallelTasks)) + { + writer.WritePropertyName("maxParallelTasks"u8); + writer.WriteNumberValue(MaxParallelTasks.Value); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (Optional.IsDefined(PoolInfo)) + { + writer.WritePropertyName("poolInfo"u8); + writer.WriteObjectValue(PoolInfo, options); + } + if (Optional.IsDefined(OnAllTasksComplete)) + { + writer.WritePropertyName("onAllTasksComplete"u8); + writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchJobUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchJobUpdateContent(document.RootElement, options); + } + + internal static BatchJobUpdateContent DeserializeBatchJobUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int? priority = default; + bool? allowTaskPreemption = default; + int? maxParallelTasks = default; + BatchJobConstraints constraints = default; + BatchPoolInfo poolInfo = default; + OnAllBatchTasksComplete? onAllTasksComplete = default; + IList metadata = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("priority"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + priority = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("allowTaskPreemption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allowTaskPreemption = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("maxParallelTasks"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxParallelTasks = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchJobConstraints.DeserializeBatchJobConstraints(property.Value, options); + continue; + } + if (property.NameEquals("poolInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + poolInfo = BatchPoolInfo.DeserializeBatchPoolInfo(property.Value, options); + continue; + } + if (property.NameEquals("onAllTasksComplete"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchJobUpdateContent( + priority, + allowTaskPreemption, + maxParallelTasks, + constraints, + poolInfo, + onAllTasksComplete, + metadata ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchJobUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchJobUpdateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchJobUpdateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchJobUpdateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs new file mode 100644 index 0000000000000..5776641d88eaf --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating an Azure Batch Job. + public partial class BatchJobUpdateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchJobUpdateContent() + { + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. + /// The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. + /// Keeps track of any properties unknown to the library. + internal BatchJobUpdateContent(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, IList metadata, IDictionary serializedAdditionalRawData) + { + Priority = priority; + AllowTaskPreemption = allowTaskPreemption; + MaxParallelTasks = maxParallelTasks; + Constraints = constraints; + PoolInfo = poolInfo; + OnAllTasksComplete = onAllTasksComplete; + Metadata = metadata; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. + public int? Priority { get; set; } + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + public bool? AllowTaskPreemption { get; set; } + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + public int? MaxParallelTasks { get; set; } + /// The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. + public BatchJobConstraints Constraints { get; set; } + /// The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. + public BatchPoolInfo PoolInfo { get; set; } + /// The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. + public IList Metadata { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs new file mode 100644 index 0000000000000..f37cee7103fa6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs @@ -0,0 +1,480 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNode : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNode)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (Optional.IsDefined(SchedulingState)) + { + writer.WritePropertyName("schedulingState"u8); + writer.WriteStringValue(SchedulingState.Value.ToString()); + } + if (Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (Optional.IsDefined(LastBootTime)) + { + writer.WritePropertyName("lastBootTime"u8); + writer.WriteStringValue(LastBootTime.Value, "O"); + } + if (Optional.IsDefined(AllocationTime)) + { + writer.WritePropertyName("allocationTime"u8); + writer.WriteStringValue(AllocationTime.Value, "O"); + } + if (Optional.IsDefined(IpAddress)) + { + writer.WritePropertyName("ipAddress"u8); + writer.WriteStringValue(IpAddress); + } + if (Optional.IsDefined(AffinityId)) + { + writer.WritePropertyName("affinityId"u8); + writer.WriteStringValue(AffinityId); + } + if (Optional.IsDefined(VmSize)) + { + writer.WritePropertyName("vmSize"u8); + writer.WriteStringValue(VmSize); + } + if (Optional.IsDefined(TotalTasksRun)) + { + writer.WritePropertyName("totalTasksRun"u8); + writer.WriteNumberValue(TotalTasksRun.Value); + } + if (Optional.IsDefined(RunningTasksCount)) + { + writer.WritePropertyName("runningTasksCount"u8); + writer.WriteNumberValue(RunningTasksCount.Value); + } + if (Optional.IsDefined(RunningTaskSlotsCount)) + { + writer.WritePropertyName("runningTaskSlotsCount"u8); + writer.WriteNumberValue(RunningTaskSlotsCount.Value); + } + if (Optional.IsDefined(TotalTasksSucceeded)) + { + writer.WritePropertyName("totalTasksSucceeded"u8); + writer.WriteNumberValue(TotalTasksSucceeded.Value); + } + if (Optional.IsCollectionDefined(RecentTasks)) + { + writer.WritePropertyName("recentTasks"u8); + writer.WriteStartArray(); + foreach (var item in RecentTasks) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(StartTask)) + { + writer.WritePropertyName("startTask"u8); + writer.WriteObjectValue(StartTask, options); + } + if (Optional.IsDefined(StartTaskInfo)) + { + writer.WritePropertyName("startTaskInfo"u8); + writer.WriteObjectValue(StartTaskInfo, options); + } + if (Optional.IsCollectionDefined(Errors)) + { + writer.WritePropertyName("errors"u8); + writer.WriteStartArray(); + foreach (var item in Errors) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(IsDedicated)) + { + writer.WritePropertyName("isDedicated"u8); + writer.WriteBooleanValue(IsDedicated.Value); + } + if (Optional.IsDefined(EndpointConfiguration)) + { + writer.WritePropertyName("endpointConfiguration"u8); + writer.WriteObjectValue(EndpointConfiguration, options); + } + if (Optional.IsDefined(NodeAgentInfo)) + { + writer.WritePropertyName("nodeAgentInfo"u8); + writer.WriteObjectValue(NodeAgentInfo, options); + } + if (Optional.IsDefined(VirtualMachineInfo)) + { + writer.WritePropertyName("virtualMachineInfo"u8); + writer.WriteObjectValue(VirtualMachineInfo, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNode IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNode)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNode(document.RootElement, options); + } + + internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string url = default; + BatchNodeState? state = default; + SchedulingState? schedulingState = default; + DateTimeOffset? stateTransitionTime = default; + DateTimeOffset? lastBootTime = default; + DateTimeOffset? allocationTime = default; + string ipAddress = default; + string affinityId = default; + string vmSize = default; + int? totalTasksRun = default; + int? runningTasksCount = default; + int? runningTaskSlotsCount = default; + int? totalTasksSucceeded = default; + IReadOnlyList recentTasks = default; + BatchStartTask startTask = default; + BatchStartTaskInfo startTaskInfo = default; + IReadOnlyList errors = default; + bool? isDedicated = default; + BatchNodeEndpointConfiguration endpointConfiguration = default; + BatchNodeAgentInfo nodeAgentInfo = default; + VirtualMachineInfo virtualMachineInfo = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchNodeState(property.Value.GetString()); + continue; + } + if (property.NameEquals("schedulingState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + schedulingState = new SchedulingState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastBootTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastBootTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("allocationTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allocationTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("ipAddress"u8)) + { + ipAddress = property.Value.GetString(); + continue; + } + if (property.NameEquals("affinityId"u8)) + { + affinityId = property.Value.GetString(); + continue; + } + if (property.NameEquals("vmSize"u8)) + { + vmSize = property.Value.GetString(); + continue; + } + if (property.NameEquals("totalTasksRun"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + totalTasksRun = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("runningTasksCount"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + runningTasksCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("runningTaskSlotsCount"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + runningTaskSlotsCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("totalTasksSucceeded"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + totalTasksSucceeded = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("recentTasks"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchTaskInfo.DeserializeBatchTaskInfo(item, options)); + } + recentTasks = array; + continue; + } + if (property.NameEquals("startTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); + continue; + } + if (property.NameEquals("startTaskInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTaskInfo = BatchStartTaskInfo.DeserializeBatchStartTaskInfo(property.Value, options); + continue; + } + if (property.NameEquals("errors"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchNodeError.DeserializeBatchNodeError(item, options)); + } + errors = array; + continue; + } + if (property.NameEquals("isDedicated"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + isDedicated = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("endpointConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endpointConfiguration = BatchNodeEndpointConfiguration.DeserializeBatchNodeEndpointConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("nodeAgentInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeAgentInfo = BatchNodeAgentInfo.DeserializeBatchNodeAgentInfo(property.Value, options); + continue; + } + if (property.NameEquals("virtualMachineInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + virtualMachineInfo = VirtualMachineInfo.DeserializeVirtualMachineInfo(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNode( + id, + url, + state, + schedulingState, + stateTransitionTime, + lastBootTime, + allocationTime, + ipAddress, + affinityId, + vmSize, + totalTasksRun, + runningTasksCount, + runningTaskSlotsCount, + totalTasksSucceeded, + recentTasks ?? new ChangeTrackingList(), + startTask, + startTaskInfo, + errors ?? new ChangeTrackingList(), + isDedicated, + endpointConfiguration, + nodeAgentInfo, + virtualMachineInfo, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNode)} does not support writing '{options.Format}' format."); + } + } + + BatchNode IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNode(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNode)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNode FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNode(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs new file mode 100644 index 0000000000000..a1e4ac04664ee --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A Compute Node in the Batch service. + public partial class BatchNode + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchNode() + { + RecentTasks = new ChangeTrackingList(); + Errors = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. + /// The URL of the Compute Node. + /// The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + /// Whether the Compute Node is available for Task scheduling. + /// The time at which the Compute Node entered its current state. + /// The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. + /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. + /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. + /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. + /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. + /// The Task specified to run on the Compute Node as it joins the Pool. + /// Runtime information about the execution of the StartTask on the Compute Node. + /// The list of errors that are currently being encountered by the Compute Node. + /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. + /// The endpoint configuration for the Compute Node. + /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. + /// Info about the current state of the virtual machine. + /// Keeps track of any properties unknown to the library. + internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, string ipAddress, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) + { + Id = id; + Url = url; + State = state; + SchedulingState = schedulingState; + StateTransitionTime = stateTransitionTime; + LastBootTime = lastBootTime; + AllocationTime = allocationTime; + IpAddress = ipAddress; + AffinityId = affinityId; + VmSize = vmSize; + TotalTasksRun = totalTasksRun; + RunningTasksCount = runningTasksCount; + RunningTaskSlotsCount = runningTaskSlotsCount; + TotalTasksSucceeded = totalTasksSucceeded; + RecentTasks = recentTasks; + StartTask = startTask; + StartTaskInfo = startTaskInfo; + Errors = errors; + IsDedicated = isDedicated; + EndpointConfiguration = endpointConfiguration; + NodeAgentInfo = nodeAgentInfo; + VirtualMachineInfo = virtualMachineInfo; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. + public string Id { get; } + /// The URL of the Compute Node. + public string Url { get; } + /// The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + public BatchNodeState? State { get; } + /// Whether the Compute Node is available for Task scheduling. + public SchedulingState? SchedulingState { get; } + /// The time at which the Compute Node entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. + public DateTimeOffset? LastBootTime { get; } + /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. + public DateTimeOffset? AllocationTime { get; } + /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. + public string IpAddress { get; } + /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. + public string AffinityId { get; } + /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + public string VmSize { get; } + /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + public int? TotalTasksRun { get; } + /// The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + public int? RunningTasksCount { get; } + /// The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + public int? RunningTaskSlotsCount { get; } + /// The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + public int? TotalTasksSucceeded { get; } + /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. + public IReadOnlyList RecentTasks { get; } + /// The Task specified to run on the Compute Node as it joins the Pool. + public BatchStartTask StartTask { get; } + /// Runtime information about the execution of the StartTask on the Compute Node. + public BatchStartTaskInfo StartTaskInfo { get; } + /// The list of errors that are currently being encountered by the Compute Node. + public IReadOnlyList Errors { get; } + /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. + public bool? IsDedicated { get; } + /// The endpoint configuration for the Compute Node. + public BatchNodeEndpointConfiguration EndpointConfiguration { get; } + /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. + public BatchNodeAgentInfo NodeAgentInfo { get; } + /// Info about the current state of the virtual machine. + public VirtualMachineInfo VirtualMachineInfo { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeAgentInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeAgentInfo.Serialization.cs new file mode 100644 index 0000000000000..7ba74ff308f94 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeAgentInfo.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeAgentInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeAgentInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("version"u8); + writer.WriteStringValue(Version); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeAgentInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeAgentInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeAgentInfo(document.RootElement, options); + } + + internal static BatchNodeAgentInfo DeserializeBatchNodeAgentInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string version = default; + DateTimeOffset lastUpdateTime = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("version"u8)) + { + version = property.Value.GetString(); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeAgentInfo(version, lastUpdateTime, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeAgentInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeAgentInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeAgentInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeAgentInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeAgentInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeAgentInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeAgentInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeAgentInfo.cs new file mode 100644 index 0000000000000..bcb1536e4afc9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeAgentInfo.cs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The Batch Compute Node agent is a program that runs on each Compute Node in the + /// Pool and provides Batch capability on the Compute Node. + /// + public partial class BatchNodeAgentInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + /// The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. + /// is null. + internal BatchNodeAgentInfo(string version, DateTimeOffset lastUpdateTime) + { + Argument.AssertNotNull(version, nameof(version)); + + Version = version; + LastUpdateTime = lastUpdateTime; + } + + /// Initializes a new instance of . + /// The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + /// The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. + /// Keeps track of any properties unknown to the library. + internal BatchNodeAgentInfo(string version, DateTimeOffset lastUpdateTime, IDictionary serializedAdditionalRawData) + { + Version = version; + LastUpdateTime = lastUpdateTime; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchNodeAgentInfo() + { + } + + /// The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + public string Version { get; } + /// The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. + public DateTimeOffset LastUpdateTime { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCommunicationMode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCommunicationMode.cs new file mode 100644 index 0000000000000..099e16af3f208 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCommunicationMode.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeCommunicationMode enums. + public readonly partial struct BatchNodeCommunicationMode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeCommunicationMode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DefaultValue = "default"; + private const string ClassicValue = "classic"; + private const string SimplifiedValue = "simplified"; + + /// The node communication mode is automatically set by the Batch service. + public static BatchNodeCommunicationMode Default { get; } = new BatchNodeCommunicationMode(DefaultValue); + /// Nodes using the classic communication mode require inbound TCP communication on ports 29876 and 29877 from the "BatchNodeManagement.{region}" service tag and outbound TCP communication on port 443 to the "Storage.region" and "BatchNodeManagement.{region}" service tags. + public static BatchNodeCommunicationMode Classic { get; } = new BatchNodeCommunicationMode(ClassicValue); + /// Nodes using the simplified communication mode require outbound TCP communication on port 443 to the "BatchNodeManagement.{region}" service tag. No open inbound ports are required. + public static BatchNodeCommunicationMode Simplified { get; } = new BatchNodeCommunicationMode(SimplifiedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeCommunicationMode left, BatchNodeCommunicationMode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeCommunicationMode left, BatchNodeCommunicationMode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodeCommunicationMode(string value) => new BatchNodeCommunicationMode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeCommunicationMode other && Equals(other); + /// + public bool Equals(BatchNodeCommunicationMode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs new file mode 100644 index 0000000000000..97f6bfb824bfe --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.Serialization.cs @@ -0,0 +1,263 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeCounts : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeCounts)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("creating"u8); + writer.WriteNumberValue(Creating); + writer.WritePropertyName("idle"u8); + writer.WriteNumberValue(Idle); + writer.WritePropertyName("offline"u8); + writer.WriteNumberValue(Offline); + writer.WritePropertyName("preempted"u8); + writer.WriteNumberValue(Preempted); + writer.WritePropertyName("rebooting"u8); + writer.WriteNumberValue(Rebooting); + writer.WritePropertyName("reimaging"u8); + writer.WriteNumberValue(Reimaging); + writer.WritePropertyName("running"u8); + writer.WriteNumberValue(Running); + writer.WritePropertyName("starting"u8); + writer.WriteNumberValue(Starting); + writer.WritePropertyName("startTaskFailed"u8); + writer.WriteNumberValue(StartTaskFailed); + writer.WritePropertyName("leavingPool"u8); + writer.WriteNumberValue(LeavingPool); + writer.WritePropertyName("unknown"u8); + writer.WriteNumberValue(Unknown); + writer.WritePropertyName("unusable"u8); + writer.WriteNumberValue(Unusable); + writer.WritePropertyName("waitingForStartTask"u8); + writer.WriteNumberValue(WaitingForStartTask); + writer.WritePropertyName("total"u8); + writer.WriteNumberValue(Total); + writer.WritePropertyName("upgradingOS"u8); + writer.WriteNumberValue(UpgradingOs); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeCounts IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeCounts)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeCounts(document.RootElement, options); + } + + internal static BatchNodeCounts DeserializeBatchNodeCounts(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int creating = default; + int idle = default; + int offline = default; + int preempted = default; + int rebooting = default; + int reimaging = default; + int running = default; + int starting = default; + int startTaskFailed = default; + int leavingPool = default; + int unknown = default; + int unusable = default; + int waitingForStartTask = default; + int total = default; + int upgradingOS = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("creating"u8)) + { + creating = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("idle"u8)) + { + idle = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("offline"u8)) + { + offline = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("preempted"u8)) + { + preempted = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("rebooting"u8)) + { + rebooting = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("reimaging"u8)) + { + reimaging = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("running"u8)) + { + running = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("starting"u8)) + { + starting = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("startTaskFailed"u8)) + { + startTaskFailed = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("leavingPool"u8)) + { + leavingPool = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("unknown"u8)) + { + unknown = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("unusable"u8)) + { + unusable = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("waitingForStartTask"u8)) + { + waitingForStartTask = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("total"u8)) + { + total = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("upgradingOS"u8)) + { + upgradingOS = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeCounts( + creating, + idle, + offline, + preempted, + rebooting, + reimaging, + running, + starting, + startTaskFailed, + leavingPool, + unknown, + unusable, + waitingForStartTask, + total, + upgradingOS, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeCounts)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeCounts IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeCounts(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeCounts)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeCounts FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeCounts(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs new file mode 100644 index 0000000000000..56ccd9fa23738 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeCounts.cs @@ -0,0 +1,156 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The number of Compute Nodes in each Compute Node state. + public partial class BatchNodeCounts + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of Compute Nodes in the creating state. + /// The number of Compute Nodes in the idle state. + /// The number of Compute Nodes in the offline state. + /// The number of Compute Nodes in the preempted state. + /// The count of Compute Nodes in the rebooting state. + /// The number of Compute Nodes in the reimaging state. + /// The number of Compute Nodes in the running state. + /// The number of Compute Nodes in the starting state. + /// The number of Compute Nodes in the startTaskFailed state. + /// The number of Compute Nodes in the leavingPool state. + /// The number of Compute Nodes in the unknown state. + /// The number of Compute Nodes in the unusable state. + /// The number of Compute Nodes in the waitingForStartTask state. + /// The total number of Compute Nodes. + /// The number of Compute Nodes in the upgradingOS state. + internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int rebooting, int reimaging, int running, int starting, int startTaskFailed, int leavingPool, int unknown, int unusable, int waitingForStartTask, int total, int upgradingOs) + { + Creating = creating; + Idle = idle; + Offline = offline; + Preempted = preempted; + Rebooting = rebooting; + Reimaging = reimaging; + Running = running; + Starting = starting; + StartTaskFailed = startTaskFailed; + LeavingPool = leavingPool; + Unknown = unknown; + Unusable = unusable; + WaitingForStartTask = waitingForStartTask; + Total = total; + UpgradingOs = upgradingOs; + } + + /// Initializes a new instance of . + /// The number of Compute Nodes in the creating state. + /// The number of Compute Nodes in the idle state. + /// The number of Compute Nodes in the offline state. + /// The number of Compute Nodes in the preempted state. + /// The count of Compute Nodes in the rebooting state. + /// The number of Compute Nodes in the reimaging state. + /// The number of Compute Nodes in the running state. + /// The number of Compute Nodes in the starting state. + /// The number of Compute Nodes in the startTaskFailed state. + /// The number of Compute Nodes in the leavingPool state. + /// The number of Compute Nodes in the unknown state. + /// The number of Compute Nodes in the unusable state. + /// The number of Compute Nodes in the waitingForStartTask state. + /// The total number of Compute Nodes. + /// The number of Compute Nodes in the upgradingOS state. + /// Keeps track of any properties unknown to the library. + internal BatchNodeCounts(int creating, int idle, int offline, int preempted, int rebooting, int reimaging, int running, int starting, int startTaskFailed, int leavingPool, int unknown, int unusable, int waitingForStartTask, int total, int upgradingOs, IDictionary serializedAdditionalRawData) + { + Creating = creating; + Idle = idle; + Offline = offline; + Preempted = preempted; + Rebooting = rebooting; + Reimaging = reimaging; + Running = running; + Starting = starting; + StartTaskFailed = startTaskFailed; + LeavingPool = leavingPool; + Unknown = unknown; + Unusable = unusable; + WaitingForStartTask = waitingForStartTask; + Total = total; + UpgradingOs = upgradingOs; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchNodeCounts() + { + } + + /// The number of Compute Nodes in the creating state. + public int Creating { get; } + /// The number of Compute Nodes in the idle state. + public int Idle { get; } + /// The number of Compute Nodes in the offline state. + public int Offline { get; } + /// The number of Compute Nodes in the preempted state. + public int Preempted { get; } + /// The count of Compute Nodes in the rebooting state. + public int Rebooting { get; } + /// The number of Compute Nodes in the reimaging state. + public int Reimaging { get; } + /// The number of Compute Nodes in the running state. + public int Running { get; } + /// The number of Compute Nodes in the starting state. + public int Starting { get; } + /// The number of Compute Nodes in the startTaskFailed state. + public int StartTaskFailed { get; } + /// The number of Compute Nodes in the leavingPool state. + public int LeavingPool { get; } + /// The number of Compute Nodes in the unknown state. + public int Unknown { get; } + /// The number of Compute Nodes in the unusable state. + public int Unusable { get; } + /// The number of Compute Nodes in the waitingForStartTask state. + public int WaitingForStartTask { get; } + /// The total number of Compute Nodes. + public int Total { get; } + /// The number of Compute Nodes in the upgradingOS state. + public int UpgradingOs { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs new file mode 100644 index 0000000000000..c7a053de6dca5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeDeallocationOption enums. + public readonly partial struct BatchNodeDeallocationOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeDeallocationOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RequeueValue = "requeue"; + private const string TerminateValue = "terminate"; + private const string TaskCompletionValue = "taskcompletion"; + private const string RetainedDataValue = "retaineddata"; + + /// Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Remove Compute Nodes as soon as Tasks have been terminated. + public static BatchNodeDeallocationOption Requeue { get; } = new BatchNodeDeallocationOption(RequeueValue); + /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove Compute Nodes as soon as Tasks have been terminated. + public static BatchNodeDeallocationOption Terminate { get; } = new BatchNodeDeallocationOption(TerminateValue); + /// Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Remove Compute Nodes when all Tasks have completed. + public static BatchNodeDeallocationOption TaskCompletion { get; } = new BatchNodeDeallocationOption(TaskCompletionValue); + /// Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Remove Compute Nodes when all Task retention periods have expired. + public static BatchNodeDeallocationOption RetainedData { get; } = new BatchNodeDeallocationOption(RetainedDataValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeDeallocationOption left, BatchNodeDeallocationOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeDeallocationOption left, BatchNodeDeallocationOption right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodeDeallocationOption(string value) => new BatchNodeDeallocationOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeDeallocationOption other && Equals(other); + /// + public bool Equals(BatchNodeDeallocationOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.Serialization.cs new file mode 100644 index 0000000000000..ef8420b187398 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeDisableSchedulingContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(NodeDisableSchedulingOption)) + { + writer.WritePropertyName("nodeDisableSchedulingOption"u8); + writer.WriteStringValue(NodeDisableSchedulingOption.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeDisableSchedulingContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeDisableSchedulingContent(document.RootElement, options); + } + + internal static BatchNodeDisableSchedulingContent DeserializeBatchNodeDisableSchedulingContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchNodeDisableSchedulingOption? nodeDisableSchedulingOption = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeDisableSchedulingOption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeDisableSchedulingOption = new BatchNodeDisableSchedulingOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeDisableSchedulingContent(nodeDisableSchedulingOption, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeDisableSchedulingContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeDisableSchedulingContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeDisableSchedulingContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeDisableSchedulingContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.cs new file mode 100644 index 0000000000000..bfbc6ff802903 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for disabling scheduling on an Azure Batch Compute Node. + public partial class BatchNodeDisableSchedulingContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeDisableSchedulingContent() + { + } + + /// Initializes a new instance of . + /// What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. + /// Keeps track of any properties unknown to the library. + internal BatchNodeDisableSchedulingContent(BatchNodeDisableSchedulingOption? nodeDisableSchedulingOption, IDictionary serializedAdditionalRawData) + { + NodeDisableSchedulingOption = nodeDisableSchedulingOption; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. + public BatchNodeDisableSchedulingOption? NodeDisableSchedulingOption { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOption.cs new file mode 100644 index 0000000000000..65eed252c0567 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOption.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeDisableSchedulingOption enums. + public readonly partial struct BatchNodeDisableSchedulingOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeDisableSchedulingOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RequeueValue = "requeue"; + private const string TerminateValue = "terminate"; + private const string TaskCompletionValue = "taskcompletion"; + + /// Terminate running Task processes and requeue the Tasks. The Tasks may run again on other Compute Nodes, or when Task scheduling is re-enabled on this Compute Node. Enter offline state as soon as Tasks have been terminated. + public static BatchNodeDisableSchedulingOption Requeue { get; } = new BatchNodeDisableSchedulingOption(RequeueValue); + /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Enter offline state as soon as Tasks have been terminated. + public static BatchNodeDisableSchedulingOption Terminate { get; } = new BatchNodeDisableSchedulingOption(TerminateValue); + /// Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Enter offline state when all Tasks have completed. + public static BatchNodeDisableSchedulingOption TaskCompletion { get; } = new BatchNodeDisableSchedulingOption(TaskCompletionValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeDisableSchedulingOption left, BatchNodeDisableSchedulingOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeDisableSchedulingOption left, BatchNodeDisableSchedulingOption right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodeDisableSchedulingOption(string value) => new BatchNodeDisableSchedulingOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeDisableSchedulingOption other && Equals(other); + /// + public bool Equals(BatchNodeDisableSchedulingOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeEndpointConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeEndpointConfiguration.Serialization.cs new file mode 100644 index 0000000000000..0afc6fc011764 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeEndpointConfiguration.Serialization.cs @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeEndpointConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeEndpointConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("inboundEndpoints"u8); + writer.WriteStartArray(); + foreach (var item in InboundEndpoints) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeEndpointConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeEndpointConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeEndpointConfiguration(document.RootElement, options); + } + + internal static BatchNodeEndpointConfiguration DeserializeBatchNodeEndpointConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList inboundEndpoints = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("inboundEndpoints"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(InboundEndpoint.DeserializeInboundEndpoint(item, options)); + } + inboundEndpoints = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeEndpointConfiguration(inboundEndpoints, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeEndpointConfiguration)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeEndpointConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeEndpointConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeEndpointConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeEndpointConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeEndpointConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeEndpointConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeEndpointConfiguration.cs new file mode 100644 index 0000000000000..a42143d4f1b97 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeEndpointConfiguration.cs @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// The endpoint configuration for the Compute Node. + public partial class BatchNodeEndpointConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The list of inbound endpoints that are accessible on the Compute Node. + /// is null. + internal BatchNodeEndpointConfiguration(IEnumerable inboundEndpoints) + { + Argument.AssertNotNull(inboundEndpoints, nameof(inboundEndpoints)); + + InboundEndpoints = inboundEndpoints.ToList(); + } + + /// Initializes a new instance of . + /// The list of inbound endpoints that are accessible on the Compute Node. + /// Keeps track of any properties unknown to the library. + internal BatchNodeEndpointConfiguration(IReadOnlyList inboundEndpoints, IDictionary serializedAdditionalRawData) + { + InboundEndpoints = inboundEndpoints; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchNodeEndpointConfiguration() + { + } + + /// The list of inbound endpoints that are accessible on the Compute Node. + public IReadOnlyList InboundEndpoints { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeError.Serialization.cs new file mode 100644 index 0000000000000..c3c8b9f8c8eba --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeError.Serialization.cs @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeError : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeError)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsCollectionDefined(ErrorDetails)) + { + writer.WritePropertyName("errorDetails"u8); + writer.WriteStartArray(); + foreach (var item in ErrorDetails) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeError)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeError(document.RootElement, options); + } + + internal static BatchNodeError DeserializeBatchNodeError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + string message = default; + IReadOnlyList errorDetails = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("errorDetails"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NameValuePair.DeserializeNameValuePair(item, options)); + } + errorDetails = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeError(code, message, errorDetails ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeError)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeError)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeError FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeError(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeError.cs new file mode 100644 index 0000000000000..b2b7ddff0a13c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeError.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error encountered by a Compute Node. + public partial class BatchNodeError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchNodeError() + { + ErrorDetails = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Compute Node error, intended to be suitable for display in a user interface. + /// The list of additional error details related to the Compute Node error. + /// Keeps track of any properties unknown to the library. + internal BatchNodeError(string code, string message, IReadOnlyList errorDetails, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + ErrorDetails = errorDetails; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; } + /// A message describing the Compute Node error, intended to be suitable for display in a user interface. + public string Message { get; } + /// The list of additional error details related to the Compute Node error. + public IReadOnlyList ErrorDetails { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs new file mode 100644 index 0000000000000..16ea064af291e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs @@ -0,0 +1,179 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeFile : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeFile)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Name)) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + if (Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (Optional.IsDefined(IsDirectory)) + { + writer.WritePropertyName("isDirectory"u8); + writer.WriteBooleanValue(IsDirectory.Value); + } + if (Optional.IsDefined(Properties)) + { + writer.WritePropertyName("properties"u8); + writer.WriteObjectValue(Properties, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeFile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeFile)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeFile(document.RootElement, options); + } + + internal static BatchNodeFile DeserializeBatchNodeFile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string url = default; + bool? isDirectory = default; + FileProperties properties = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("isDirectory"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + isDirectory = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("properties"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + properties = FileProperties.DeserializeFileProperties(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeFile(name, url, isDirectory, properties, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeFile)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeFile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeFile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeFile)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeFile FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeFile(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs new file mode 100644 index 0000000000000..8e04bac0ed493 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about a file or directory on a Compute Node. + public partial class BatchNodeFile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchNodeFile() + { + } + + /// Initializes a new instance of . + /// The file path. + /// The URL of the file. + /// Whether the object represents a directory. + /// The file properties. + /// Keeps track of any properties unknown to the library. + internal BatchNodeFile(string name, string url, bool? isDirectory, FileProperties properties, IDictionary serializedAdditionalRawData) + { + Name = name; + Url = url; + IsDirectory = isDirectory; + Properties = properties; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The file path. + public string Name { get; } + /// The URL of the file. + public string Url { get; } + /// Whether the object represents a directory. + public bool? IsDirectory { get; } + /// The file properties. + public FileProperties Properties { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFillType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFillType.cs new file mode 100644 index 0000000000000..8b1be261df606 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFillType.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeFillType enums. + public readonly partial struct BatchNodeFillType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeFillType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SpreadValue = "spread"; + private const string PackValue = "pack"; + + /// Tasks should be assigned evenly across all Compute Nodes in the Pool. + public static BatchNodeFillType Spread { get; } = new BatchNodeFillType(SpreadValue); + /// As many Tasks as possible (taskSlotsPerNode) should be assigned to each Compute Node in the Pool before any Tasks are assigned to the next Compute Node in the Pool. + public static BatchNodeFillType Pack { get; } = new BatchNodeFillType(PackValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeFillType left, BatchNodeFillType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeFillType left, BatchNodeFillType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodeFillType(string value) => new BatchNodeFillType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeFillType other && Equals(other); + /// + public bool Equals(BatchNodeFillType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs new file mode 100644 index 0000000000000..2a9d604213552 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeIdentityReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeIdentityReference)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(ResourceId)) + { + writer.WritePropertyName("resourceId"u8); + writer.WriteStringValue(ResourceId); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeIdentityReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeIdentityReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeIdentityReference(document.RootElement, options); + } + + internal static BatchNodeIdentityReference DeserializeBatchNodeIdentityReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string resourceId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("resourceId"u8)) + { + resourceId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeIdentityReference(resourceId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeIdentityReference)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeIdentityReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeIdentityReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeIdentityReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeIdentityReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeIdentityReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs new file mode 100644 index 0000000000000..1c421ab9689c1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The reference to a user assigned identity associated with the Batch pool which + /// a compute node will use. + /// + public partial class BatchNodeIdentityReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeIdentityReference() + { + } + + /// Initializes a new instance of . + /// The ARM resource id of the user assigned identity. + /// Keeps track of any properties unknown to the library. + internal BatchNodeIdentityReference(string resourceId, IDictionary serializedAdditionalRawData) + { + ResourceId = resourceId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ARM resource id of the user assigned identity. + public string ResourceId { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs new file mode 100644 index 0000000000000..e15bde2933332 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(AffinityId)) + { + writer.WritePropertyName("affinityId"u8); + writer.WriteStringValue(AffinityId); + } + if (Optional.IsDefined(NodeUrl)) + { + writer.WritePropertyName("nodeUrl"u8); + writer.WriteStringValue(NodeUrl); + } + if (Optional.IsDefined(PoolId)) + { + writer.WritePropertyName("poolId"u8); + writer.WriteStringValue(PoolId); + } + if (Optional.IsDefined(NodeId)) + { + writer.WritePropertyName("nodeId"u8); + writer.WriteStringValue(NodeId); + } + if (Optional.IsDefined(TaskRootDirectory)) + { + writer.WritePropertyName("taskRootDirectory"u8); + writer.WriteStringValue(TaskRootDirectory); + } + if (Optional.IsDefined(TaskRootDirectoryUrl)) + { + writer.WritePropertyName("taskRootDirectoryUrl"u8); + writer.WriteStringValue(TaskRootDirectoryUrl); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeInfo(document.RootElement, options); + } + + internal static BatchNodeInfo DeserializeBatchNodeInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string affinityId = default; + string nodeUrl = default; + string poolId = default; + string nodeId = default; + string taskRootDirectory = default; + string taskRootDirectoryUrl = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("affinityId"u8)) + { + affinityId = property.Value.GetString(); + continue; + } + if (property.NameEquals("nodeUrl"u8)) + { + nodeUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("poolId"u8)) + { + poolId = property.Value.GetString(); + continue; + } + if (property.NameEquals("nodeId"u8)) + { + nodeId = property.Value.GetString(); + continue; + } + if (property.NameEquals("taskRootDirectory"u8)) + { + taskRootDirectory = property.Value.GetString(); + continue; + } + if (property.NameEquals("taskRootDirectoryUrl"u8)) + { + taskRootDirectoryUrl = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeInfo( + affinityId, + nodeUrl, + poolId, + nodeId, + taskRootDirectory, + taskRootDirectoryUrl, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs new file mode 100644 index 0000000000000..bc11497d21557 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about the Compute Node on which a Task ran. + public partial class BatchNodeInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeInfo() + { + } + + /// Initializes a new instance of . + /// An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. + /// The URL of the Compute Node on which the Task ran. + /// The ID of the Pool on which the Task ran. + /// The ID of the Compute Node on which the Task ran. + /// The root directory of the Task on the Compute Node. + /// The URL to the root directory of the Task on the Compute Node. + /// Keeps track of any properties unknown to the library. + internal BatchNodeInfo(string affinityId, string nodeUrl, string poolId, string nodeId, string taskRootDirectory, string taskRootDirectoryUrl, IDictionary serializedAdditionalRawData) + { + AffinityId = affinityId; + NodeUrl = nodeUrl; + PoolId = poolId; + NodeId = nodeId; + TaskRootDirectory = taskRootDirectory; + TaskRootDirectoryUrl = taskRootDirectoryUrl; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. + public string AffinityId { get; set; } + /// The URL of the Compute Node on which the Task ran. + public string NodeUrl { get; set; } + /// The ID of the Pool on which the Task ran. + public string PoolId { get; set; } + /// The ID of the Compute Node on which the Task ran. + public string NodeId { get; set; } + /// The root directory of the Task on the Compute Node. + public string TaskRootDirectory { get; set; } + /// The URL to the root directory of the Task on the Compute Node. + public string TaskRootDirectoryUrl { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementConfiguration.Serialization.cs new file mode 100644 index 0000000000000..cfb6cad983dd2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementConfiguration.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodePlacementConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodePlacementConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Policy)) + { + writer.WritePropertyName("policy"u8); + writer.WriteStringValue(Policy.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodePlacementConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodePlacementConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodePlacementConfiguration(document.RootElement, options); + } + + internal static BatchNodePlacementConfiguration DeserializeBatchNodePlacementConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchNodePlacementPolicyType? policy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("policy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + policy = new BatchNodePlacementPolicyType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodePlacementConfiguration(policy, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodePlacementConfiguration)} does not support writing '{options.Format}' format."); + } + } + + BatchNodePlacementConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodePlacementConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodePlacementConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodePlacementConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodePlacementConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementConfiguration.cs new file mode 100644 index 0000000000000..450afff846fd3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementConfiguration.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// For regional placement, nodes in the pool will be allocated in the same region. + /// For zonal placement, nodes in the pool will be spread across different zones + /// with best effort balancing. + /// + public partial class BatchNodePlacementConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodePlacementConfiguration() + { + } + + /// Initializes a new instance of . + /// Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. + /// Keeps track of any properties unknown to the library. + internal BatchNodePlacementConfiguration(BatchNodePlacementPolicyType? policy, IDictionary serializedAdditionalRawData) + { + Policy = policy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. + public BatchNodePlacementPolicyType? Policy { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementPolicyType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementPolicyType.cs new file mode 100644 index 0000000000000..86fdd3f542e4b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodePlacementPolicyType.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodePlacementPolicyType enums. + public readonly partial struct BatchNodePlacementPolicyType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodePlacementPolicyType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RegionalValue = "regional"; + private const string ZonalValue = "zonal"; + + /// All nodes in the pool will be allocated in the same region. + public static BatchNodePlacementPolicyType Regional { get; } = new BatchNodePlacementPolicyType(RegionalValue); + /// Nodes in the pool will be spread across different availability zones with best effort balancing. + public static BatchNodePlacementPolicyType Zonal { get; } = new BatchNodePlacementPolicyType(ZonalValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodePlacementPolicyType left, BatchNodePlacementPolicyType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodePlacementPolicyType left, BatchNodePlacementPolicyType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodePlacementPolicyType(string value) => new BatchNodePlacementPolicyType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodePlacementPolicyType other && Equals(other); + /// + public bool Equals(BatchNodePlacementPolicyType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.Serialization.cs new file mode 100644 index 0000000000000..73dc3dfeb3de4 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeRebootContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(NodeRebootOption)) + { + writer.WritePropertyName("nodeRebootOption"u8); + writer.WriteStringValue(NodeRebootOption.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeRebootContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeRebootContent(document.RootElement, options); + } + + internal static BatchNodeRebootContent DeserializeBatchNodeRebootContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchNodeRebootOption? nodeRebootOption = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeRebootOption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeRebootOption = new BatchNodeRebootOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeRebootContent(nodeRebootOption, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeRebootContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeRebootContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeRebootContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeRebootContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.cs new file mode 100644 index 0000000000000..2202761043ae7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for rebooting an Azure Batch Compute Node. + public partial class BatchNodeRebootContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeRebootContent() + { + } + + /// Initializes a new instance of . + /// When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. + /// Keeps track of any properties unknown to the library. + internal BatchNodeRebootContent(BatchNodeRebootOption? nodeRebootOption, IDictionary serializedAdditionalRawData) + { + NodeRebootOption = nodeRebootOption; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. + public BatchNodeRebootOption? NodeRebootOption { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOption.cs new file mode 100644 index 0000000000000..a8004a8dd490a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOption.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeRebootOption enums. + public readonly partial struct BatchNodeRebootOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeRebootOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RequeueValue = "requeue"; + private const string TerminateValue = "terminate"; + private const string TaskCompletionValue = "taskcompletion"; + private const string RetainedDataValue = "retaineddata"; + + /// Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated. + public static BatchNodeRebootOption Requeue { get; } = new BatchNodeRebootOption(RequeueValue); + /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks have been terminated. + public static BatchNodeRebootOption Terminate { get; } = new BatchNodeRebootOption(TerminateValue); + /// Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the Compute Node when all Tasks have completed. + public static BatchNodeRebootOption TaskCompletion { get; } = new BatchNodeRebootOption(TaskCompletionValue); + /// Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention periods have expired. + public static BatchNodeRebootOption RetainedData { get; } = new BatchNodeRebootOption(RetainedDataValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeRebootOption left, BatchNodeRebootOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeRebootOption left, BatchNodeRebootOption right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodeRebootOption(string value) => new BatchNodeRebootOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeRebootOption other && Equals(other); + /// + public bool Equals(BatchNodeRebootOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs new file mode 100644 index 0000000000000..f9aec5b2571b6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeRemoteLoginSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeRemoteLoginSettings)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("remoteLoginIPAddress"u8); + writer.WriteStringValue(RemoteLoginIpAddress); + writer.WritePropertyName("remoteLoginPort"u8); + writer.WriteNumberValue(RemoteLoginPort); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeRemoteLoginSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeRemoteLoginSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeRemoteLoginSettings(document.RootElement, options); + } + + internal static BatchNodeRemoteLoginSettings DeserializeBatchNodeRemoteLoginSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string remoteLoginIPAddress = default; + int remoteLoginPort = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("remoteLoginIPAddress"u8)) + { + remoteLoginIPAddress = property.Value.GetString(); + continue; + } + if (property.NameEquals("remoteLoginPort"u8)) + { + remoteLoginPort = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeRemoteLoginSettings(remoteLoginIPAddress, remoteLoginPort, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeRemoteLoginSettings)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeRemoteLoginSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeRemoteLoginSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeRemoteLoginSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeRemoteLoginSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeRemoteLoginSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs new file mode 100644 index 0000000000000..3b66830690ae8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The remote login settings for a Compute Node. + public partial class BatchNodeRemoteLoginSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The IP address used for remote login to the Compute Node. + /// The port used for remote login to the Compute Node. + /// is null. + internal BatchNodeRemoteLoginSettings(string remoteLoginIpAddress, int remoteLoginPort) + { + Argument.AssertNotNull(remoteLoginIpAddress, nameof(remoteLoginIpAddress)); + + RemoteLoginIpAddress = remoteLoginIpAddress; + RemoteLoginPort = remoteLoginPort; + } + + /// Initializes a new instance of . + /// The IP address used for remote login to the Compute Node. + /// The port used for remote login to the Compute Node. + /// Keeps track of any properties unknown to the library. + internal BatchNodeRemoteLoginSettings(string remoteLoginIpAddress, int remoteLoginPort, IDictionary serializedAdditionalRawData) + { + RemoteLoginIpAddress = remoteLoginIpAddress; + RemoteLoginPort = remoteLoginPort; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchNodeRemoteLoginSettings() + { + } + + /// The IP address used for remote login to the Compute Node. + public string RemoteLoginIpAddress { get; } + /// The port used for remote login to the Compute Node. + public int RemoteLoginPort { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.Serialization.cs new file mode 100644 index 0000000000000..3828c539a2702 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.Serialization.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeRemoveContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("nodeList"u8); + writer.WriteStartArray(); + foreach (var item in NodeList) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + if (Optional.IsDefined(ResizeTimeout)) + { + writer.WritePropertyName("resizeTimeout"u8); + writer.WriteStringValue(ResizeTimeout.Value, "P"); + } + if (Optional.IsDefined(NodeDeallocationOption)) + { + writer.WritePropertyName("nodeDeallocationOption"u8); + writer.WriteStringValue(NodeDeallocationOption.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeRemoveContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeRemoveContent(document.RootElement, options); + } + + internal static BatchNodeRemoveContent DeserializeBatchNodeRemoveContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList nodeList = default; + TimeSpan? resizeTimeout = default; + BatchNodeDeallocationOption? nodeDeallocationOption = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeList"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + nodeList = array; + continue; + } + if (property.NameEquals("resizeTimeout"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resizeTimeout = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("nodeDeallocationOption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeDeallocationOption = new BatchNodeDeallocationOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeRemoveContent(nodeList, resizeTimeout, nodeDeallocationOption, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeRemoveContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeRemoveContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeRemoveContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeRemoveContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.cs new file mode 100644 index 0000000000000..ba28ad62da22e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.cs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// Parameters for removing nodes from an Azure Batch Pool. + public partial class BatchNodeRemoveContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. + /// is null. + public BatchNodeRemoveContent(IEnumerable nodeList) + { + Argument.AssertNotNull(nodeList, nameof(nodeList)); + + NodeList = nodeList.ToList(); + } + + /// Initializes a new instance of . + /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. + /// The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. + /// Keeps track of any properties unknown to the library. + internal BatchNodeRemoveContent(IList nodeList, TimeSpan? resizeTimeout, BatchNodeDeallocationOption? nodeDeallocationOption, IDictionary serializedAdditionalRawData) + { + NodeList = nodeList; + ResizeTimeout = resizeTimeout; + NodeDeallocationOption = nodeDeallocationOption; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchNodeRemoveContent() + { + } + + /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. + public IList NodeList { get; } + /// The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? ResizeTimeout { get; set; } + /// Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. + public BatchNodeDeallocationOption? NodeDeallocationOption { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs new file mode 100644 index 0000000000000..5eced04ff1268 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeState.cs @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchNodeState enums. + public readonly partial struct BatchNodeState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchNodeState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string IdleValue = "idle"; + private const string RebootingValue = "rebooting"; + private const string ReimagingValue = "reimaging"; + private const string RunningValue = "running"; + private const string UnusableValue = "unusable"; + private const string CreatingValue = "creating"; + private const string StartingValue = "starting"; + private const string WaitingForStartTaskValue = "waitingforstarttask"; + private const string StartTaskFailedValue = "starttaskfailed"; + private const string UnknownValue = "unknown"; + private const string LeavingPoolValue = "leavingpool"; + private const string OfflineValue = "offline"; + private const string PreemptedValue = "preempted"; + private const string UpgradingOSValue = "upgradingos"; + + /// The Compute Node is not currently running a Task. + public static BatchNodeState Idle { get; } = new BatchNodeState(IdleValue); + /// The Compute Node is rebooting. + public static BatchNodeState Rebooting { get; } = new BatchNodeState(RebootingValue); + /// The Compute Node is reimaging. + public static BatchNodeState Reimaging { get; } = new BatchNodeState(ReimagingValue); + /// The Compute Node is running one or more Tasks (other than a StartTask). + public static BatchNodeState Running { get; } = new BatchNodeState(RunningValue); + /// The Compute Node cannot be used for Task execution due to errors. + public static BatchNodeState Unusable { get; } = new BatchNodeState(UnusableValue); + /// The Batch service has obtained the underlying virtual machine from Azure Compute, but it has not yet started to join the Pool. + public static BatchNodeState Creating { get; } = new BatchNodeState(CreatingValue); + /// The Batch service is starting on the underlying virtual machine. + public static BatchNodeState Starting { get; } = new BatchNodeState(StartingValue); + /// The StartTask has started running on the Compute Node, but waitForSuccess is set and the StartTask has not yet completed. + public static BatchNodeState WaitingForStartTask { get; } = new BatchNodeState(WaitingForStartTaskValue); + /// The StartTask has failed on the Compute Node (and exhausted all retries), and waitForSuccess is set. The Compute Node is not usable for running Tasks. + public static BatchNodeState StartTaskFailed { get; } = new BatchNodeState(StartTaskFailedValue); + /// The Batch service has lost contact with the Compute Node, and does not know its true state. + public static BatchNodeState Unknown { get; } = new BatchNodeState(UnknownValue); + /// The Compute Node is leaving the Pool, either because the user explicitly removed it or because the Pool is resizing or autoscaling down. + public static BatchNodeState LeavingPool { get; } = new BatchNodeState(LeavingPoolValue); + /// The Compute Node is not currently running a Task, and scheduling of new Tasks to the Compute Node is disabled. + public static BatchNodeState Offline { get; } = new BatchNodeState(OfflineValue); + /// The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + public static BatchNodeState Preempted { get; } = new BatchNodeState(PreemptedValue); + /// The Compute Node is undergoing an OS upgrade operation. + public static BatchNodeState UpgradingOS { get; } = new BatchNodeState(UpgradingOSValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeState left, BatchNodeState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeState left, BatchNodeState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchNodeState(string value) => new BatchNodeState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchNodeState other && Equals(other); + /// + public bool Equals(BatchNodeState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.Serialization.cs new file mode 100644 index 0000000000000..d48002248dba9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.Serialization.cs @@ -0,0 +1,193 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeUserCreateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (Optional.IsDefined(IsAdmin)) + { + writer.WritePropertyName("isAdmin"u8); + writer.WriteBooleanValue(IsAdmin.Value); + } + if (Optional.IsDefined(ExpiryTime)) + { + writer.WritePropertyName("expiryTime"u8); + writer.WriteStringValue(ExpiryTime.Value, "O"); + } + if (Optional.IsDefined(Password)) + { + writer.WritePropertyName("password"u8); + writer.WriteStringValue(Password); + } + if (Optional.IsDefined(SshPublicKey)) + { + writer.WritePropertyName("sshPublicKey"u8); + writer.WriteStringValue(SshPublicKey); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeUserCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeUserCreateContent(document.RootElement, options); + } + + internal static BatchNodeUserCreateContent DeserializeBatchNodeUserCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + bool? isAdmin = default; + DateTimeOffset? expiryTime = default; + string password = default; + string sshPublicKey = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("isAdmin"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + isAdmin = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("expiryTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + expiryTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("password"u8)) + { + password = property.Value.GetString(); + continue; + } + if (property.NameEquals("sshPublicKey"u8)) + { + sshPublicKey = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeUserCreateContent( + name, + isAdmin, + expiryTime, + password, + sshPublicKey, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeUserCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeUserCreateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeUserCreateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeUserCreateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs new file mode 100644 index 0000000000000..0f9fbcc2b1583 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for creating a user account for RDP or SSH access on an Azure Batch Compute Node. + public partial class BatchNodeUserCreateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The user name of the Account. + /// is null. + public BatchNodeUserCreateContent(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The user name of the Account. + /// Whether the Account should be an administrator on the Compute Node. The default value is false. + /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Keeps track of any properties unknown to the library. + internal BatchNodeUserCreateContent(string name, bool? isAdmin, DateTimeOffset? expiryTime, string password, string sshPublicKey, IDictionary serializedAdditionalRawData) + { + Name = name; + IsAdmin = isAdmin; + ExpiryTime = expiryTime; + Password = password; + SshPublicKey = sshPublicKey; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchNodeUserCreateContent() + { + } + + /// The user name of the Account. + public string Name { get; } + /// Whether the Account should be an administrator on the Compute Node. The default value is false. + public bool? IsAdmin { get; set; } + /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + public DateTimeOffset? ExpiryTime { get; set; } + /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + public string Password { get; set; } + /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public string SshPublicKey { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.Serialization.cs new file mode 100644 index 0000000000000..e0bf13777869d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.Serialization.cs @@ -0,0 +1,164 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeUserUpdateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Password)) + { + writer.WritePropertyName("password"u8); + writer.WriteStringValue(Password); + } + if (Optional.IsDefined(ExpiryTime)) + { + writer.WritePropertyName("expiryTime"u8); + writer.WriteStringValue(ExpiryTime.Value, "O"); + } + if (Optional.IsDefined(SshPublicKey)) + { + writer.WritePropertyName("sshPublicKey"u8); + writer.WriteStringValue(SshPublicKey); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeUserUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeUserUpdateContent(document.RootElement, options); + } + + internal static BatchNodeUserUpdateContent DeserializeBatchNodeUserUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string password = default; + DateTimeOffset? expiryTime = default; + string sshPublicKey = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("password"u8)) + { + password = property.Value.GetString(); + continue; + } + if (property.NameEquals("expiryTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + expiryTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("sshPublicKey"u8)) + { + sshPublicKey = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeUserUpdateContent(password, expiryTime, sshPublicKey, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeUserUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeUserUpdateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeUserUpdateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeUserUpdateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs new file mode 100644 index 0000000000000..8ebac642c3d5c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. + public partial class BatchNodeUserUpdateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchNodeUserUpdateContent() + { + } + + /// Initializes a new instance of . + /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. + /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. + /// Keeps track of any properties unknown to the library. + internal BatchNodeUserUpdateContent(string password, DateTimeOffset? expiryTime, string sshPublicKey, IDictionary serializedAdditionalRawData) + { + Password = password; + ExpiryTime = expiryTime; + SshPublicKey = sshPublicKey; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. + public string Password { get; set; } + /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + public DateTimeOffset? ExpiryTime { get; set; } + /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. + public string SshPublicKey { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeVMExtension.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeVMExtension.Serialization.cs new file mode 100644 index 0000000000000..fdf316f424931 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeVMExtension.Serialization.cs @@ -0,0 +1,168 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchNodeVMExtension : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeVMExtension)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(ProvisioningState)) + { + writer.WritePropertyName("provisioningState"u8); + writer.WriteStringValue(ProvisioningState); + } + if (Optional.IsDefined(VmExtension)) + { + writer.WritePropertyName("vmExtension"u8); + writer.WriteObjectValue(VmExtension, options); + } + if (Optional.IsDefined(InstanceView)) + { + writer.WritePropertyName("instanceView"u8); + writer.WriteObjectValue(InstanceView, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchNodeVMExtension IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchNodeVMExtension)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchNodeVMExtension(document.RootElement, options); + } + + internal static BatchNodeVMExtension DeserializeBatchNodeVMExtension(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string provisioningState = default; + VMExtension vmExtension = default; + VMExtensionInstanceView instanceView = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("provisioningState"u8)) + { + provisioningState = property.Value.GetString(); + continue; + } + if (property.NameEquals("vmExtension"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + vmExtension = VMExtension.DeserializeVMExtension(property.Value, options); + continue; + } + if (property.NameEquals("instanceView"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + instanceView = VMExtensionInstanceView.DeserializeVMExtensionInstanceView(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchNodeVMExtension(provisioningState, vmExtension, instanceView, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchNodeVMExtension)} does not support writing '{options.Format}' format."); + } + } + + BatchNodeVMExtension IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchNodeVMExtension(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchNodeVMExtension)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchNodeVMExtension FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchNodeVMExtension(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeVMExtension.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeVMExtension.cs new file mode 100644 index 0000000000000..ef613ba1da9a8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeVMExtension.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The configuration for virtual machine extension instance view. + public partial class BatchNodeVMExtension + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchNodeVMExtension() + { + } + + /// Initializes a new instance of . + /// The provisioning state of the virtual machine extension. + /// The virtual machine extension. + /// The vm extension instance view. + /// Keeps track of any properties unknown to the library. + internal BatchNodeVMExtension(string provisioningState, VMExtension vmExtension, VMExtensionInstanceView instanceView, IDictionary serializedAdditionalRawData) + { + ProvisioningState = provisioningState; + VmExtension = vmExtension; + InstanceView = instanceView; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The provisioning state of the virtual machine extension. + public string ProvisioningState { get; } + /// The virtual machine extension. + public VMExtension VmExtension { get; } + /// The vm extension instance view. + public VMExtensionInstanceView InstanceView { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs new file mode 100644 index 0000000000000..c006448060f22 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs @@ -0,0 +1,757 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPool : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPool)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (options.Format != "W" && Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (options.Format != "W" && Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (options.Format != "W" && Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && Optional.IsDefined(ETag)) + { + writer.WritePropertyName("eTag"u8); + writer.WriteStringValue(ETag); + } + if (options.Format != "W" && Optional.IsDefined(LastModified)) + { + writer.WritePropertyName("lastModified"u8); + writer.WriteStringValue(LastModified.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(CreationTime)) + { + writer.WritePropertyName("creationTime"u8); + writer.WriteStringValue(CreationTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(AllocationState)) + { + writer.WritePropertyName("allocationState"u8); + writer.WriteStringValue(AllocationState.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(AllocationStateTransitionTime)) + { + writer.WritePropertyName("allocationStateTransitionTime"u8); + writer.WriteStringValue(AllocationStateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(VmSize)) + { + writer.WritePropertyName("vmSize"u8); + writer.WriteStringValue(VmSize); + } + if (options.Format != "W" && Optional.IsDefined(VirtualMachineConfiguration)) + { + writer.WritePropertyName("virtualMachineConfiguration"u8); + writer.WriteObjectValue(VirtualMachineConfiguration, options); + } + if (options.Format != "W" && Optional.IsDefined(ResizeTimeout)) + { + writer.WritePropertyName("resizeTimeout"u8); + writer.WriteStringValue(ResizeTimeout.Value, "P"); + } + if (options.Format != "W" && Optional.IsCollectionDefined(ResizeErrors)) + { + writer.WritePropertyName("resizeErrors"u8); + writer.WriteStartArray(); + foreach (var item in ResizeErrors) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsCollectionDefined(ResourceTags)) + { + writer.WritePropertyName("resourceTags"u8); + writer.WriteStartObject(); + foreach (var item in ResourceTags) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + if (options.Format != "W" && Optional.IsDefined(CurrentDedicatedNodes)) + { + writer.WritePropertyName("currentDedicatedNodes"u8); + writer.WriteNumberValue(CurrentDedicatedNodes.Value); + } + if (options.Format != "W" && Optional.IsDefined(CurrentLowPriorityNodes)) + { + writer.WritePropertyName("currentLowPriorityNodes"u8); + writer.WriteNumberValue(CurrentLowPriorityNodes.Value); + } + if (options.Format != "W" && Optional.IsDefined(TargetDedicatedNodes)) + { + writer.WritePropertyName("targetDedicatedNodes"u8); + writer.WriteNumberValue(TargetDedicatedNodes.Value); + } + if (options.Format != "W" && Optional.IsDefined(TargetLowPriorityNodes)) + { + writer.WritePropertyName("targetLowPriorityNodes"u8); + writer.WriteNumberValue(TargetLowPriorityNodes.Value); + } + if (options.Format != "W" && Optional.IsDefined(EnableAutoScale)) + { + writer.WritePropertyName("enableAutoScale"u8); + writer.WriteBooleanValue(EnableAutoScale.Value); + } + if (options.Format != "W" && Optional.IsDefined(AutoScaleFormula)) + { + writer.WritePropertyName("autoScaleFormula"u8); + writer.WriteStringValue(AutoScaleFormula); + } + if (options.Format != "W" && Optional.IsDefined(AutoScaleEvaluationInterval)) + { + writer.WritePropertyName("autoScaleEvaluationInterval"u8); + writer.WriteStringValue(AutoScaleEvaluationInterval.Value, "P"); + } + if (options.Format != "W" && Optional.IsDefined(AutoScaleRun)) + { + writer.WritePropertyName("autoScaleRun"u8); + writer.WriteObjectValue(AutoScaleRun, options); + } + if (options.Format != "W" && Optional.IsDefined(EnableInterNodeCommunication)) + { + writer.WritePropertyName("enableInterNodeCommunication"u8); + writer.WriteBooleanValue(EnableInterNodeCommunication.Value); + } + if (options.Format != "W" && Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsDefined(StartTask)) + { + writer.WritePropertyName("startTask"u8); + writer.WriteObjectValue(StartTask, options); + } + if (options.Format != "W" && Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(TaskSlotsPerNode)) + { + writer.WritePropertyName("taskSlotsPerNode"u8); + writer.WriteNumberValue(TaskSlotsPerNode.Value); + } + if (options.Format != "W" && Optional.IsDefined(TaskSchedulingPolicy)) + { + writer.WritePropertyName("taskSchedulingPolicy"u8); + writer.WriteObjectValue(TaskSchedulingPolicy, options); + } + if (options.Format != "W" && Optional.IsCollectionDefined(UserAccounts)) + { + writer.WritePropertyName("userAccounts"u8); + writer.WriteStartArray(); + foreach (var item in UserAccounts) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(Stats)) + { + writer.WritePropertyName("stats"u8); + writer.WriteObjectValue(Stats, options); + } + if (options.Format != "W" && Optional.IsCollectionDefined(MountConfiguration)) + { + writer.WritePropertyName("mountConfiguration"u8); + writer.WriteStartArray(); + foreach (var item in MountConfiguration) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(Identity)) + { + writer.WritePropertyName("identity"u8); + writer.WriteObjectValue(Identity, options); + } + if (Optional.IsDefined(TargetNodeCommunicationMode)) + { + writer.WritePropertyName("targetNodeCommunicationMode"u8); + writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(CurrentNodeCommunicationMode)) + { + writer.WritePropertyName("currentNodeCommunicationMode"u8); + writer.WriteStringValue(CurrentNodeCommunicationMode.Value.ToString()); + } + if (Optional.IsDefined(UpgradePolicy)) + { + writer.WritePropertyName("upgradePolicy"u8); + writer.WriteObjectValue(UpgradePolicy, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPool)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPool(document.RootElement, options); + } + + internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + string url = default; + string eTag = default; + DateTimeOffset? lastModified = default; + DateTimeOffset? creationTime = default; + BatchPoolState? state = default; + DateTimeOffset? stateTransitionTime = default; + AllocationState? allocationState = default; + DateTimeOffset? allocationStateTransitionTime = default; + string vmSize = default; + VirtualMachineConfiguration virtualMachineConfiguration = default; + TimeSpan? resizeTimeout = default; + IReadOnlyList resizeErrors = default; + IReadOnlyDictionary resourceTags = default; + int? currentDedicatedNodes = default; + int? currentLowPriorityNodes = default; + int? targetDedicatedNodes = default; + int? targetLowPriorityNodes = default; + bool? enableAutoScale = default; + string autoScaleFormula = default; + TimeSpan? autoScaleEvaluationInterval = default; + AutoScaleRun autoScaleRun = default; + bool? enableInterNodeCommunication = default; + NetworkConfiguration networkConfiguration = default; + BatchStartTask startTask = default; + IReadOnlyList applicationPackageReferences = default; + int? taskSlotsPerNode = default; + BatchTaskSchedulingPolicy taskSchedulingPolicy = default; + IReadOnlyList userAccounts = default; + IReadOnlyList metadata = default; + BatchPoolStatistics stats = default; + IReadOnlyList mountConfiguration = default; + BatchPoolIdentity identity = default; + BatchNodeCommunicationMode? targetNodeCommunicationMode = default; + BatchNodeCommunicationMode? currentNodeCommunicationMode = default; + UpgradePolicy upgradePolicy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("eTag"u8)) + { + eTag = property.Value.GetString(); + continue; + } + if (property.NameEquals("lastModified"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastModified = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("creationTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + creationTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchPoolState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("allocationState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allocationState = new AllocationState(property.Value.GetString()); + continue; + } + if (property.NameEquals("allocationStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + allocationStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("vmSize"u8)) + { + vmSize = property.Value.GetString(); + continue; + } + if (property.NameEquals("virtualMachineConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + virtualMachineConfiguration = VirtualMachineConfiguration.DeserializeVirtualMachineConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("resizeTimeout"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resizeTimeout = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("resizeErrors"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResizeError.DeserializeResizeError(item, options)); + } + resizeErrors = array; + continue; + } + if (property.NameEquals("resourceTags"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + resourceTags = dictionary; + continue; + } + if (property.NameEquals("currentDedicatedNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + currentDedicatedNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("currentLowPriorityNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + currentLowPriorityNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("targetDedicatedNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetDedicatedNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("targetLowPriorityNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetLowPriorityNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("enableAutoScale"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAutoScale = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("autoScaleFormula"u8)) + { + autoScaleFormula = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoScaleEvaluationInterval"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoScaleEvaluationInterval = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("autoScaleRun"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoScaleRun = AutoScaleRun.DeserializeAutoScaleRun(property.Value, options); + continue; + } + if (property.NameEquals("enableInterNodeCommunication"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableInterNodeCommunication = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = NetworkConfiguration.DeserializeNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("startTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("taskSlotsPerNode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSlotsPerNode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("taskSchedulingPolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSchedulingPolicy = BatchTaskSchedulingPolicy.DeserializeBatchTaskSchedulingPolicy(property.Value, options); + continue; + } + if (property.NameEquals("userAccounts"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(UserAccount.DeserializeUserAccount(item, options)); + } + userAccounts = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("stats"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stats = BatchPoolStatistics.DeserializeBatchPoolStatistics(property.Value, options); + continue; + } + if (property.NameEquals("mountConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Batch.MountConfiguration.DeserializeMountConfiguration(item, options)); + } + mountConfiguration = array; + continue; + } + if (property.NameEquals("identity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identity = BatchPoolIdentity.DeserializeBatchPoolIdentity(property.Value, options); + continue; + } + if (property.NameEquals("targetNodeCommunicationMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); + continue; + } + if (property.NameEquals("currentNodeCommunicationMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + currentNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); + continue; + } + if (property.NameEquals("upgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + upgradePolicy = UpgradePolicy.DeserializeUpgradePolicy(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPool( + id, + displayName, + url, + eTag, + lastModified, + creationTime, + state, + stateTransitionTime, + allocationState, + allocationStateTransitionTime, + vmSize, + virtualMachineConfiguration, + resizeTimeout, + resizeErrors ?? new ChangeTrackingList(), + resourceTags ?? new ChangeTrackingDictionary(), + currentDedicatedNodes, + currentLowPriorityNodes, + targetDedicatedNodes, + targetLowPriorityNodes, + enableAutoScale, + autoScaleFormula, + autoScaleEvaluationInterval, + autoScaleRun, + enableInterNodeCommunication, + networkConfiguration, + startTask, + applicationPackageReferences ?? new ChangeTrackingList(), + taskSlotsPerNode, + taskSchedulingPolicy, + userAccounts ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), + stats, + mountConfiguration ?? new ChangeTrackingList(), + identity, + targetNodeCommunicationMode, + currentNodeCommunicationMode, + upgradePolicy, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPool)} does not support writing '{options.Format}' format."); + } + } + + BatchPool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPool(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPool)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPool FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPool(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs new file mode 100644 index 0000000000000..4cbd3bee14783 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs @@ -0,0 +1,215 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A Pool in the Azure Batch service. + public partial class BatchPool + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchPool() + { + ResizeErrors = new ChangeTrackingList(); + ResourceTags = new ChangeTrackingDictionary(); + ApplicationPackageReferences = new ChangeTrackingList(); + UserAccounts = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); + MountConfiguration = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The URL of the Pool. + /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. + /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. + /// The creation time of the Pool. + /// The current state of the Pool. + /// The time at which the Pool entered its current state. + /// Whether the Pool is resizing. + /// The time at which the Pool entered its current allocation state. + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The virtual machine configuration for the Pool. This property must be specified. + /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. + /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + /// The number of dedicated Compute Nodes currently in the Pool. + /// The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. + /// The desired number of dedicated Compute Nodes in the Pool. + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. + /// A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + /// The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. + /// The network configuration for the Pool. + /// A Task specified to run on each Compute Node as it joins the Pool. + /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + /// The list of user Accounts to be created on each Compute Node in the Pool. + /// A list of name-value pairs associated with the Pool as metadata. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + /// The desired node communication mode for the pool. If omitted, the default value is Default. + /// The current state of the pool communication mode. + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + /// Keeps track of any properties unknown to the library. + internal BatchPool(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, IReadOnlyDictionary resourceTags, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics stats, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, BatchNodeCommunicationMode? targetNodeCommunicationMode, BatchNodeCommunicationMode? currentNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + Url = url; + ETag = eTag; + LastModified = lastModified; + CreationTime = creationTime; + State = state; + StateTransitionTime = stateTransitionTime; + AllocationState = allocationState; + AllocationStateTransitionTime = allocationStateTransitionTime; + VmSize = vmSize; + VirtualMachineConfiguration = virtualMachineConfiguration; + ResizeTimeout = resizeTimeout; + ResizeErrors = resizeErrors; + ResourceTags = resourceTags; + CurrentDedicatedNodes = currentDedicatedNodes; + CurrentLowPriorityNodes = currentLowPriorityNodes; + TargetDedicatedNodes = targetDedicatedNodes; + TargetLowPriorityNodes = targetLowPriorityNodes; + EnableAutoScale = enableAutoScale; + AutoScaleFormula = autoScaleFormula; + AutoScaleEvaluationInterval = autoScaleEvaluationInterval; + AutoScaleRun = autoScaleRun; + EnableInterNodeCommunication = enableInterNodeCommunication; + NetworkConfiguration = networkConfiguration; + StartTask = startTask; + ApplicationPackageReferences = applicationPackageReferences; + TaskSlotsPerNode = taskSlotsPerNode; + TaskSchedulingPolicy = taskSchedulingPolicy; + UserAccounts = userAccounts; + Metadata = metadata; + Stats = stats; + MountConfiguration = mountConfiguration; + Identity = identity; + TargetNodeCommunicationMode = targetNodeCommunicationMode; + CurrentNodeCommunicationMode = currentNodeCommunicationMode; + UpgradePolicy = upgradePolicy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + public string Id { get; } + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; } + /// The URL of the Pool. + public string Url { get; } + /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. + public string ETag { get; } + /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. + public DateTimeOffset? LastModified { get; } + /// The creation time of the Pool. + public DateTimeOffset? CreationTime { get; } + /// The current state of the Pool. + public BatchPoolState? State { get; } + /// The time at which the Pool entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// Whether the Pool is resizing. + public AllocationState? AllocationState { get; } + /// The time at which the Pool entered its current allocation state. + public DateTimeOffset? AllocationStateTransitionTime { get; } + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + public string VmSize { get; } + /// The virtual machine configuration for the Pool. This property must be specified. + public VirtualMachineConfiguration VirtualMachineConfiguration { get; } + /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. + public TimeSpan? ResizeTimeout { get; } + /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. + public IReadOnlyList ResizeErrors { get; } + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + public IReadOnlyDictionary ResourceTags { get; } + /// The number of dedicated Compute Nodes currently in the Pool. + public int? CurrentDedicatedNodes { get; } + /// The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. + public int? CurrentLowPriorityNodes { get; } + /// The desired number of dedicated Compute Nodes in the Pool. + public int? TargetDedicatedNodes { get; } + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. + public int? TargetLowPriorityNodes { get; } + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. + public bool? EnableAutoScale { get; } + /// A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + public string AutoScaleFormula { get; } + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + public TimeSpan? AutoScaleEvaluationInterval { get; } + /// The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + public AutoScaleRun AutoScaleRun { get; } + /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. + public bool? EnableInterNodeCommunication { get; } + /// The network configuration for the Pool. + public NetworkConfiguration NetworkConfiguration { get; } + /// A Task specified to run on each Compute Node as it joins the Pool. + public BatchStartTask StartTask { get; } + /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + public IReadOnlyList ApplicationPackageReferences { get; } + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + public int? TaskSlotsPerNode { get; } + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; } + /// The list of user Accounts to be created on each Compute Node in the Pool. + public IReadOnlyList UserAccounts { get; } + /// A list of name-value pairs associated with the Pool as metadata. + public IReadOnlyList Metadata { get; } + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + public BatchPoolStatistics Stats { get; } + /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + public IReadOnlyList MountConfiguration { get; } + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + public BatchPoolIdentity Identity { get; } + /// The desired node communication mode for the pool. If omitted, the default value is Default. + public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; } + /// The current state of the pool communication mode. + public BatchNodeCommunicationMode? CurrentNodeCommunicationMode { get; } + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + public UpgradePolicy UpgradePolicy { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs new file mode 100644 index 0000000000000..6b99fdb0fe630 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs @@ -0,0 +1,509 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolCreateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + writer.WritePropertyName("vmSize"u8); + writer.WriteStringValue(VmSize); + if (Optional.IsDefined(VirtualMachineConfiguration)) + { + writer.WritePropertyName("virtualMachineConfiguration"u8); + writer.WriteObjectValue(VirtualMachineConfiguration, options); + } + if (Optional.IsDefined(ResizeTimeout)) + { + writer.WritePropertyName("resizeTimeout"u8); + writer.WriteStringValue(ResizeTimeout.Value, "P"); + } + if (Optional.IsCollectionDefined(ResourceTags)) + { + writer.WritePropertyName("resourceTags"u8); + writer.WriteStartObject(); + foreach (var item in ResourceTags) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + if (Optional.IsDefined(TargetDedicatedNodes)) + { + writer.WritePropertyName("targetDedicatedNodes"u8); + writer.WriteNumberValue(TargetDedicatedNodes.Value); + } + if (Optional.IsDefined(TargetLowPriorityNodes)) + { + writer.WritePropertyName("targetLowPriorityNodes"u8); + writer.WriteNumberValue(TargetLowPriorityNodes.Value); + } + if (Optional.IsDefined(EnableAutoScale)) + { + writer.WritePropertyName("enableAutoScale"u8); + writer.WriteBooleanValue(EnableAutoScale.Value); + } + if (Optional.IsDefined(AutoScaleFormula)) + { + writer.WritePropertyName("autoScaleFormula"u8); + writer.WriteStringValue(AutoScaleFormula); + } + if (Optional.IsDefined(AutoScaleEvaluationInterval)) + { + writer.WritePropertyName("autoScaleEvaluationInterval"u8); + writer.WriteStringValue(AutoScaleEvaluationInterval.Value, "P"); + } + if (Optional.IsDefined(EnableInterNodeCommunication)) + { + writer.WritePropertyName("enableInterNodeCommunication"u8); + writer.WriteBooleanValue(EnableInterNodeCommunication.Value); + } + if (Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsDefined(StartTask)) + { + writer.WritePropertyName("startTask"u8); + writer.WriteObjectValue(StartTask, options); + } + if (Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(TaskSlotsPerNode)) + { + writer.WritePropertyName("taskSlotsPerNode"u8); + writer.WriteNumberValue(TaskSlotsPerNode.Value); + } + if (Optional.IsDefined(TaskSchedulingPolicy)) + { + writer.WritePropertyName("taskSchedulingPolicy"u8); + writer.WriteObjectValue(TaskSchedulingPolicy, options); + } + if (Optional.IsCollectionDefined(UserAccounts)) + { + writer.WritePropertyName("userAccounts"u8); + writer.WriteStartArray(); + foreach (var item in UserAccounts) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(MountConfiguration)) + { + writer.WritePropertyName("mountConfiguration"u8); + writer.WriteStartArray(); + foreach (var item in MountConfiguration) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(TargetNodeCommunicationMode)) + { + writer.WritePropertyName("targetNodeCommunicationMode"u8); + writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); + } + if (Optional.IsDefined(UpgradePolicy)) + { + writer.WritePropertyName("upgradePolicy"u8); + writer.WriteObjectValue(UpgradePolicy, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolCreateContent(document.RootElement, options); + } + + internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + string vmSize = default; + VirtualMachineConfiguration virtualMachineConfiguration = default; + TimeSpan? resizeTimeout = default; + IDictionary resourceTags = default; + int? targetDedicatedNodes = default; + int? targetLowPriorityNodes = default; + bool? enableAutoScale = default; + string autoScaleFormula = default; + TimeSpan? autoScaleEvaluationInterval = default; + bool? enableInterNodeCommunication = default; + NetworkConfiguration networkConfiguration = default; + BatchStartTask startTask = default; + IList applicationPackageReferences = default; + int? taskSlotsPerNode = default; + BatchTaskSchedulingPolicy taskSchedulingPolicy = default; + IList userAccounts = default; + IList metadata = default; + IList mountConfiguration = default; + BatchNodeCommunicationMode? targetNodeCommunicationMode = default; + UpgradePolicy upgradePolicy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("vmSize"u8)) + { + vmSize = property.Value.GetString(); + continue; + } + if (property.NameEquals("virtualMachineConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + virtualMachineConfiguration = VirtualMachineConfiguration.DeserializeVirtualMachineConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("resizeTimeout"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resizeTimeout = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("resourceTags"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + resourceTags = dictionary; + continue; + } + if (property.NameEquals("targetDedicatedNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetDedicatedNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("targetLowPriorityNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetLowPriorityNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("enableAutoScale"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAutoScale = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("autoScaleFormula"u8)) + { + autoScaleFormula = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoScaleEvaluationInterval"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoScaleEvaluationInterval = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("enableInterNodeCommunication"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableInterNodeCommunication = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = NetworkConfiguration.DeserializeNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("startTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("taskSlotsPerNode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSlotsPerNode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("taskSchedulingPolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSchedulingPolicy = BatchTaskSchedulingPolicy.DeserializeBatchTaskSchedulingPolicy(property.Value, options); + continue; + } + if (property.NameEquals("userAccounts"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(UserAccount.DeserializeUserAccount(item, options)); + } + userAccounts = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("mountConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Batch.MountConfiguration.DeserializeMountConfiguration(item, options)); + } + mountConfiguration = array; + continue; + } + if (property.NameEquals("targetNodeCommunicationMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); + continue; + } + if (property.NameEquals("upgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + upgradePolicy = UpgradePolicy.DeserializeUpgradePolicy(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolCreateContent( + id, + displayName, + vmSize, + virtualMachineConfiguration, + resizeTimeout, + resourceTags ?? new ChangeTrackingDictionary(), + targetDedicatedNodes, + targetLowPriorityNodes, + enableAutoScale, + autoScaleFormula, + autoScaleEvaluationInterval, + enableInterNodeCommunication, + networkConfiguration, + startTask, + applicationPackageReferences ?? new ChangeTrackingList(), + taskSlotsPerNode, + taskSchedulingPolicy, + userAccounts ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), + mountConfiguration ?? new ChangeTrackingList(), + targetNodeCommunicationMode, + upgradePolicy, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolCreateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolCreateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolCreateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs new file mode 100644 index 0000000000000..78653a2849d33 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for creating an Azure Batch Pool. + public partial class BatchPoolCreateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// or is null. + public BatchPoolCreateContent(string id, string vmSize) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(vmSize, nameof(vmSize)); + + Id = id; + VmSize = vmSize; + ResourceTags = new ChangeTrackingDictionary(); + ApplicationPackageReferences = new ChangeTrackingList(); + UserAccounts = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); + MountConfiguration = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The virtual machine configuration for the Pool. This property must be specified. + /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. + /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. + /// The network configuration for the Pool. + /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + /// The list of user Accounts to be created on each Compute Node in the Pool. + /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. + /// The desired node communication mode for the pool. If omitted, the default value is Default. + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + /// Keeps track of any properties unknown to the library. + internal BatchPoolCreateContent(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IDictionary resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + VmSize = vmSize; + VirtualMachineConfiguration = virtualMachineConfiguration; + ResizeTimeout = resizeTimeout; + ResourceTags = resourceTags; + TargetDedicatedNodes = targetDedicatedNodes; + TargetLowPriorityNodes = targetLowPriorityNodes; + EnableAutoScale = enableAutoScale; + AutoScaleFormula = autoScaleFormula; + AutoScaleEvaluationInterval = autoScaleEvaluationInterval; + EnableInterNodeCommunication = enableInterNodeCommunication; + NetworkConfiguration = networkConfiguration; + StartTask = startTask; + ApplicationPackageReferences = applicationPackageReferences; + TaskSlotsPerNode = taskSlotsPerNode; + TaskSchedulingPolicy = taskSchedulingPolicy; + UserAccounts = userAccounts; + Metadata = metadata; + MountConfiguration = mountConfiguration; + TargetNodeCommunicationMode = targetNodeCommunicationMode; + UpgradePolicy = upgradePolicy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolCreateContent() + { + } + + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). + public string Id { get; } + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + public string VmSize { get; } + /// The virtual machine configuration for the Pool. This property must be specified. + public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } + /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? ResizeTimeout { get; set; } + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + public IDictionary ResourceTags { get; } + /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + public int? TargetDedicatedNodes { get; set; } + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + public int? TargetLowPriorityNodes { get; set; } + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. + public bool? EnableAutoScale { get; set; } + /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + public string AutoScaleFormula { get; set; } + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? AutoScaleEvaluationInterval { get; set; } + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. + public bool? EnableInterNodeCommunication { get; set; } + /// The network configuration for the Pool. + public NetworkConfiguration NetworkConfiguration { get; set; } + /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + public BatchStartTask StartTask { get; set; } + /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + public IList ApplicationPackageReferences { get; } + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + public int? TaskSlotsPerNode { get; set; } + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; set; } + /// The list of user Accounts to be created on each Compute Node in the Pool. + public IList UserAccounts { get; } + /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. + public IList MountConfiguration { get; } + /// The desired node communication mode for the pool. If omitted, the default value is Default. + public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + public UpgradePolicy UpgradePolicy { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.Serialization.cs new file mode 100644 index 0000000000000..f4616dd380466 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.Serialization.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolEnableAutoScaleContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(AutoScaleFormula)) + { + writer.WritePropertyName("autoScaleFormula"u8); + writer.WriteStringValue(AutoScaleFormula); + } + if (Optional.IsDefined(AutoScaleEvaluationInterval)) + { + writer.WritePropertyName("autoScaleEvaluationInterval"u8); + writer.WriteStringValue(AutoScaleEvaluationInterval.Value, "P"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolEnableAutoScaleContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolEnableAutoScaleContent(document.RootElement, options); + } + + internal static BatchPoolEnableAutoScaleContent DeserializeBatchPoolEnableAutoScaleContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string autoScaleFormula = default; + TimeSpan? autoScaleEvaluationInterval = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("autoScaleFormula"u8)) + { + autoScaleFormula = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoScaleEvaluationInterval"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoScaleEvaluationInterval = property.Value.GetTimeSpan("P"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolEnableAutoScaleContent(autoScaleFormula, autoScaleEvaluationInterval, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolEnableAutoScaleContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolEnableAutoScaleContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolEnableAutoScaleContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolEnableAutoScaleContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs new file mode 100644 index 0000000000000..13446bd47d3c2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for enabling automatic scaling on an Azure Batch Pool. + public partial class BatchPoolEnableAutoScaleContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchPoolEnableAutoScaleContent() + { + } + + /// Initializes a new instance of . + /// The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. + /// Keeps track of any properties unknown to the library. + internal BatchPoolEnableAutoScaleContent(string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, IDictionary serializedAdditionalRawData) + { + AutoScaleFormula = autoScaleFormula; + AutoScaleEvaluationInterval = autoScaleEvaluationInterval; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The formula for the desired number of Compute Nodes in the Pool. The formula is checked for validity before it is applied to the Pool. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + public string AutoScaleFormula { get; set; } + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. + public TimeSpan? AutoScaleEvaluationInterval { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs new file mode 100644 index 0000000000000..ad9474bedb9e9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolEndpointConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolEndpointConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("inboundNATPools"u8); + writer.WriteStartArray(); + foreach (var item in InboundNatPools) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolEndpointConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolEndpointConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolEndpointConfiguration(document.RootElement, options); + } + + internal static BatchPoolEndpointConfiguration DeserializeBatchPoolEndpointConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList inboundNATPools = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("inboundNATPools"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(InboundNatPool.DeserializeInboundNatPool(item, options)); + } + inboundNATPools = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolEndpointConfiguration(inboundNATPools, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolEndpointConfiguration)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolEndpointConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolEndpointConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolEndpointConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolEndpointConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolEndpointConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs new file mode 100644 index 0000000000000..0c8173fbeed5c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// The endpoint configuration for a Pool. + public partial class BatchPoolEndpointConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. + /// is null. + public BatchPoolEndpointConfiguration(IEnumerable inboundNatPools) + { + Argument.AssertNotNull(inboundNatPools, nameof(inboundNatPools)); + + InboundNatPools = inboundNatPools.ToList(); + } + + /// Initializes a new instance of . + /// A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. + /// Keeps track of any properties unknown to the library. + internal BatchPoolEndpointConfiguration(IList inboundNatPools, IDictionary serializedAdditionalRawData) + { + InboundNatPools = inboundNatPools; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolEndpointConfiguration() + { + } + + /// A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. + public IList InboundNatPools { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.Serialization.cs new file mode 100644 index 0000000000000..7f9ec19c737ee --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolEvaluateAutoScaleContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("autoScaleFormula"u8); + writer.WriteStringValue(AutoScaleFormula); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolEvaluateAutoScaleContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolEvaluateAutoScaleContent(document.RootElement, options); + } + + internal static BatchPoolEvaluateAutoScaleContent DeserializeBatchPoolEvaluateAutoScaleContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string autoScaleFormula = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("autoScaleFormula"u8)) + { + autoScaleFormula = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolEvaluateAutoScaleContent(autoScaleFormula, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolEvaluateAutoScaleContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolEvaluateAutoScaleContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolEvaluateAutoScaleContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolEvaluateAutoScaleContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs new file mode 100644 index 0000000000000..b89f8e7df1cdc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. + public partial class BatchPoolEvaluateAutoScaleContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// is null. + public BatchPoolEvaluateAutoScaleContent(string autoScaleFormula) + { + Argument.AssertNotNull(autoScaleFormula, nameof(autoScaleFormula)); + + AutoScaleFormula = autoScaleFormula; + } + + /// Initializes a new instance of . + /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + /// Keeps track of any properties unknown to the library. + internal BatchPoolEvaluateAutoScaleContent(string autoScaleFormula, IDictionary serializedAdditionalRawData) + { + AutoScaleFormula = autoScaleFormula; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolEvaluateAutoScaleContent() + { + } + + /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://azure.microsoft.com/en-us/documentation/articles/batch-automatic-scaling). + public string AutoScaleFormula { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs new file mode 100644 index 0000000000000..67f75f8105ea1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs @@ -0,0 +1,160 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolIdentity : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolIdentity)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + if (Optional.IsCollectionDefined(UserAssignedIdentities)) + { + writer.WritePropertyName("userAssignedIdentities"u8); + writer.WriteStartArray(); + foreach (var item in UserAssignedIdentities) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolIdentity IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolIdentity)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolIdentity(document.RootElement, options); + } + + internal static BatchPoolIdentity DeserializeBatchPoolIdentity(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchPoolIdentityType type = default; + IReadOnlyList userAssignedIdentities = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new BatchPoolIdentityType(property.Value.GetString()); + continue; + } + if (property.NameEquals("userAssignedIdentities"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(UserAssignedIdentity.DeserializeUserAssignedIdentity(item, options)); + } + userAssignedIdentities = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolIdentity(type, userAssignedIdentities ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolIdentity)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolIdentity IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolIdentity(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolIdentity)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolIdentity FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolIdentity(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs new file mode 100644 index 0000000000000..4cf680eb1c409 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The identity of the Batch pool, if configured. + public partial class BatchPoolIdentity + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + internal BatchPoolIdentity(BatchPoolIdentityType type) + { + Type = type; + UserAssignedIdentities = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + /// The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + /// Keeps track of any properties unknown to the library. + internal BatchPoolIdentity(BatchPoolIdentityType type, IReadOnlyList userAssignedIdentities, IDictionary serializedAdditionalRawData) + { + Type = type; + UserAssignedIdentities = userAssignedIdentities; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolIdentity() + { + } + + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + public BatchPoolIdentityType Type { get; } + /// The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + public IReadOnlyList UserAssignedIdentities { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityType.cs new file mode 100644 index 0000000000000..0af2bf9d596f7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentityType.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchPoolIdentityType enums. + public readonly partial struct BatchPoolIdentityType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchPoolIdentityType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserAssignedValue = "UserAssigned"; + private const string NoneValue = "None"; + + /// Batch pool has user assigned identities with it. + public static BatchPoolIdentityType UserAssigned { get; } = new BatchPoolIdentityType(UserAssignedValue); + /// Batch pool has no identity associated with it. Setting `None` in update pool will remove existing identities. + public static BatchPoolIdentityType None { get; } = new BatchPoolIdentityType(NoneValue); + /// Determines if two values are the same. + public static bool operator ==(BatchPoolIdentityType left, BatchPoolIdentityType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchPoolIdentityType left, BatchPoolIdentityType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchPoolIdentityType(string value) => new BatchPoolIdentityType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchPoolIdentityType other && Equals(other); + /// + public bool Equals(BatchPoolIdentityType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolInfo.Serialization.cs new file mode 100644 index 0000000000000..dd367f46c8d33 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolInfo.Serialization.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(PoolId)) + { + writer.WritePropertyName("poolId"u8); + writer.WriteStringValue(PoolId); + } + if (Optional.IsDefined(AutoPoolSpecification)) + { + writer.WritePropertyName("autoPoolSpecification"u8); + writer.WriteObjectValue(AutoPoolSpecification, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolInfo(document.RootElement, options); + } + + internal static BatchPoolInfo DeserializeBatchPoolInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string poolId = default; + BatchAutoPoolSpecification autoPoolSpecification = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("poolId"u8)) + { + poolId = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoPoolSpecification"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoPoolSpecification = BatchAutoPoolSpecification.DeserializeBatchAutoPoolSpecification(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolInfo(poolId, autoPoolSpecification, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolInfo.cs new file mode 100644 index 0000000000000..512e1f978ab1e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolInfo.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies how a Job should be assigned to a Pool. + public partial class BatchPoolInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchPoolInfo() + { + } + + /// Initializes a new instance of . + /// The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both. + /// Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both. + /// Keeps track of any properties unknown to the library. + internal BatchPoolInfo(string poolId, BatchAutoPoolSpecification autoPoolSpecification, IDictionary serializedAdditionalRawData) + { + PoolId = poolId; + AutoPoolSpecification = autoPoolSpecification; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ID of an existing Pool. All the Tasks of the Job will run on the specified Pool. You must ensure that the Pool referenced by this property exists. If the Pool does not exist at the time the Batch service tries to schedule a Job, no Tasks for the Job will run until you create a Pool with that id. Note that the Batch service will not reject the Job request; it will simply not run Tasks until the Pool exists. You must specify either the Pool ID or the auto Pool specification, but not both. + public string PoolId { get; set; } + /// Characteristics for a temporary 'auto pool'. The Batch service will create this auto Pool when the Job is submitted. If auto Pool creation fails, the Batch service moves the Job to a completed state, and the Pool creation error is set in the Job's scheduling error property. The Batch service manages the lifetime (both creation and, unless keepAlive is specified, deletion) of the auto Pool. Any user actions that affect the lifetime of the auto Pool while the Job is active will result in unexpected behavior. You must specify either the Pool ID or the auto Pool specification, but not both. + public BatchAutoPoolSpecification AutoPoolSpecification { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolLifetimeOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolLifetimeOption.cs new file mode 100644 index 0000000000000..2e8e96c1c019e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolLifetimeOption.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchPoolLifetimeOption enums. + public readonly partial struct BatchPoolLifetimeOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchPoolLifetimeOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JobScheduleValue = "jobschedule"; + private const string JobValue = "job"; + + /// The Pool exists for the lifetime of the Job Schedule. The Batch Service creates the Pool when it creates the first Job on the schedule. You may apply this option only to Job Schedules, not to Jobs. + public static BatchPoolLifetimeOption JobSchedule { get; } = new BatchPoolLifetimeOption(JobScheduleValue); + /// The Pool exists for the lifetime of the Job to which it is dedicated. The Batch service creates the Pool when it creates the Job. If the 'job' option is applied to a Job Schedule, the Batch service creates a new auto Pool for every Job created on the schedule. + public static BatchPoolLifetimeOption Job { get; } = new BatchPoolLifetimeOption(JobValue); + /// Determines if two values are the same. + public static bool operator ==(BatchPoolLifetimeOption left, BatchPoolLifetimeOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchPoolLifetimeOption left, BatchPoolLifetimeOption right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchPoolLifetimeOption(string value) => new BatchPoolLifetimeOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchPoolLifetimeOption other && Equals(other); + /// + public bool Equals(BatchPoolLifetimeOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolNodeCounts.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolNodeCounts.Serialization.cs new file mode 100644 index 0000000000000..cb3ec6b4e8072 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolNodeCounts.Serialization.cs @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolNodeCounts : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolNodeCounts)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("poolId"u8); + writer.WriteStringValue(PoolId); + if (Optional.IsDefined(Dedicated)) + { + writer.WritePropertyName("dedicated"u8); + writer.WriteObjectValue(Dedicated, options); + } + if (Optional.IsDefined(LowPriority)) + { + writer.WritePropertyName("lowPriority"u8); + writer.WriteObjectValue(LowPriority, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolNodeCounts IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolNodeCounts)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolNodeCounts(document.RootElement, options); + } + + internal static BatchPoolNodeCounts DeserializeBatchPoolNodeCounts(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string poolId = default; + BatchNodeCounts dedicated = default; + BatchNodeCounts lowPriority = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("poolId"u8)) + { + poolId = property.Value.GetString(); + continue; + } + if (property.NameEquals("dedicated"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dedicated = BatchNodeCounts.DeserializeBatchNodeCounts(property.Value, options); + continue; + } + if (property.NameEquals("lowPriority"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lowPriority = BatchNodeCounts.DeserializeBatchNodeCounts(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolNodeCounts(poolId, dedicated, lowPriority, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolNodeCounts)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolNodeCounts IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolNodeCounts(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolNodeCounts)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolNodeCounts FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolNodeCounts(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolNodeCounts.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolNodeCounts.cs new file mode 100644 index 0000000000000..ffcf85b12f646 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolNodeCounts.cs @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The number of Compute Nodes in each state for a Pool. + public partial class BatchPoolNodeCounts + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the Pool. + /// is null. + internal BatchPoolNodeCounts(string poolId) + { + Argument.AssertNotNull(poolId, nameof(poolId)); + + PoolId = poolId; + } + + /// Initializes a new instance of . + /// The ID of the Pool. + /// The number of dedicated Compute Nodes in each state. + /// The number of Spot/Low-priority Compute Nodes in each state. + /// Keeps track of any properties unknown to the library. + internal BatchPoolNodeCounts(string poolId, BatchNodeCounts dedicated, BatchNodeCounts lowPriority, IDictionary serializedAdditionalRawData) + { + PoolId = poolId; + Dedicated = dedicated; + LowPriority = lowPriority; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolNodeCounts() + { + } + + /// The ID of the Pool. + public string PoolId { get; } + /// The number of dedicated Compute Nodes in each state. + public BatchNodeCounts Dedicated { get; } + /// The number of Spot/Low-priority Compute Nodes in each state. + public BatchNodeCounts LowPriority { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs new file mode 100644 index 0000000000000..c0746e2dcb459 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs @@ -0,0 +1,60 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolReplaceContent : IUtf8JsonSerializable, IJsonModel + { + BatchPoolReplaceContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolReplaceContent(document.RootElement, options); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolReplaceContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolReplaceContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs new file mode 100644 index 0000000000000..15a93a07a92ab --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// Parameters for replacing properties on an Azure Batch Pool. + public partial class BatchPoolReplaceContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. + /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. + /// or is null. + public BatchPoolReplaceContent(IEnumerable applicationPackageReferences, IEnumerable metadata) + { + Argument.AssertNotNull(applicationPackageReferences, nameof(applicationPackageReferences)); + Argument.AssertNotNull(metadata, nameof(metadata)); + + ApplicationPackageReferences = applicationPackageReferences.ToList(); + Metadata = metadata.ToList(); + } + + /// Initializes a new instance of . + /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. + /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. + /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. + /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. + /// Keeps track of any properties unknown to the library. + internal BatchPoolReplaceContent(BatchStartTask startTask, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) + { + StartTask = startTask; + ApplicationPackageReferences = applicationPackageReferences; + Metadata = metadata; + TargetNodeCommunicationMode = targetNodeCommunicationMode; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolReplaceContent() + { + } + + /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. + public BatchStartTask StartTask { get; set; } + /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. + public IList ApplicationPackageReferences { get; } + /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. + public IList Metadata { get; } + /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. + public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.Serialization.cs new file mode 100644 index 0000000000000..2a68d6fcb75a0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.Serialization.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolResizeContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(TargetDedicatedNodes)) + { + writer.WritePropertyName("targetDedicatedNodes"u8); + writer.WriteNumberValue(TargetDedicatedNodes.Value); + } + if (Optional.IsDefined(TargetLowPriorityNodes)) + { + writer.WritePropertyName("targetLowPriorityNodes"u8); + writer.WriteNumberValue(TargetLowPriorityNodes.Value); + } + if (Optional.IsDefined(ResizeTimeout)) + { + writer.WritePropertyName("resizeTimeout"u8); + writer.WriteStringValue(ResizeTimeout.Value, "P"); + } + if (Optional.IsDefined(NodeDeallocationOption)) + { + writer.WritePropertyName("nodeDeallocationOption"u8); + writer.WriteStringValue(NodeDeallocationOption.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolResizeContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolResizeContent(document.RootElement, options); + } + + internal static BatchPoolResizeContent DeserializeBatchPoolResizeContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int? targetDedicatedNodes = default; + int? targetLowPriorityNodes = default; + TimeSpan? resizeTimeout = default; + BatchNodeDeallocationOption? nodeDeallocationOption = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("targetDedicatedNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetDedicatedNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("targetLowPriorityNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetLowPriorityNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("resizeTimeout"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resizeTimeout = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("nodeDeallocationOption"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeDeallocationOption = new BatchNodeDeallocationOption(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolResizeContent(targetDedicatedNodes, targetLowPriorityNodes, resizeTimeout, nodeDeallocationOption, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolResizeContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolResizeContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolResizeContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolResizeContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.cs new file mode 100644 index 0000000000000..2a85fe641ba47 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for changing the size of an Azure Batch Pool. + public partial class BatchPoolResizeContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchPoolResizeContent() + { + } + + /// Initializes a new instance of . + /// The desired number of dedicated Compute Nodes in the Pool. + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. + /// The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. + /// Keeps track of any properties unknown to the library. + internal BatchPoolResizeContent(int? targetDedicatedNodes, int? targetLowPriorityNodes, TimeSpan? resizeTimeout, BatchNodeDeallocationOption? nodeDeallocationOption, IDictionary serializedAdditionalRawData) + { + TargetDedicatedNodes = targetDedicatedNodes; + TargetLowPriorityNodes = targetLowPriorityNodes; + ResizeTimeout = resizeTimeout; + NodeDeallocationOption = nodeDeallocationOption; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The desired number of dedicated Compute Nodes in the Pool. + public int? TargetDedicatedNodes { get; set; } + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. + public int? TargetLowPriorityNodes { get; set; } + /// The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? ResizeTimeout { get; set; } + /// Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. + public BatchNodeDeallocationOption? NodeDeallocationOption { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs new file mode 100644 index 0000000000000..061b3ac2b785e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolResourceStatistics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolResourceStatistics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + writer.WritePropertyName("avgCPUPercentage"u8); + writer.WriteNumberValue(AvgCpuPercentage); + writer.WritePropertyName("avgMemoryGiB"u8); + writer.WriteNumberValue(AvgMemoryGiB); + writer.WritePropertyName("peakMemoryGiB"u8); + writer.WriteNumberValue(PeakMemoryGiB); + writer.WritePropertyName("avgDiskGiB"u8); + writer.WriteNumberValue(AvgDiskGiB); + writer.WritePropertyName("peakDiskGiB"u8); + writer.WriteNumberValue(PeakDiskGiB); + writer.WritePropertyName("diskReadIOps"u8); + writer.WriteNumberValue(DiskReadIOps); + writer.WritePropertyName("diskWriteIOps"u8); + writer.WriteNumberValue(DiskWriteIOps); + writer.WritePropertyName("diskReadGiB"u8); + writer.WriteNumberValue(DiskReadGiB); + writer.WritePropertyName("diskWriteGiB"u8); + writer.WriteNumberValue(DiskWriteGiB); + writer.WritePropertyName("networkReadGiB"u8); + writer.WriteNumberValue(NetworkReadGiB); + writer.WritePropertyName("networkWriteGiB"u8); + writer.WriteNumberValue(NetworkWriteGiB); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolResourceStatistics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolResourceStatistics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolResourceStatistics(document.RootElement, options); + } + + internal static BatchPoolResourceStatistics DeserializeBatchPoolResourceStatistics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset startTime = default; + DateTimeOffset lastUpdateTime = default; + float avgCPUPercentage = default; + float avgMemoryGiB = default; + float peakMemoryGiB = default; + float avgDiskGiB = default; + float peakDiskGiB = default; + long diskReadIOps = default; + long diskWriteIOps = default; + float diskReadGiB = default; + float diskWriteGiB = default; + float networkReadGiB = default; + float networkWriteGiB = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("avgCPUPercentage"u8)) + { + avgCPUPercentage = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("avgMemoryGiB"u8)) + { + avgMemoryGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("peakMemoryGiB"u8)) + { + peakMemoryGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("avgDiskGiB"u8)) + { + avgDiskGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("peakDiskGiB"u8)) + { + peakDiskGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("diskReadIOps"u8)) + { + diskReadIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("diskWriteIOps"u8)) + { + diskWriteIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("diskReadGiB"u8)) + { + diskReadGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("diskWriteGiB"u8)) + { + diskWriteGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("networkReadGiB"u8)) + { + networkReadGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("networkWriteGiB"u8)) + { + networkWriteGiB = property.Value.GetSingle(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolResourceStatistics( + startTime, + lastUpdateTime, + avgCPUPercentage, + avgMemoryGiB, + peakMemoryGiB, + avgDiskGiB, + peakDiskGiB, + diskReadIOps, + diskWriteIOps, + diskReadGiB, + diskWriteGiB, + networkReadGiB, + networkWriteGiB, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolResourceStatistics)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolResourceStatistics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolResourceStatistics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolResourceStatistics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolResourceStatistics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolResourceStatistics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs new file mode 100644 index 0000000000000..0fd446ceda9ea --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs @@ -0,0 +1,144 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Statistics related to resource consumption by Compute Nodes in a Pool. + public partial class BatchPoolResourceStatistics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The average CPU usage across all Compute Nodes in the Pool (percentage per node). + /// The average memory usage in GiB across all Compute Nodes in the Pool. + /// The peak memory usage in GiB across all Compute Nodes in the Pool. + /// The average used disk space in GiB across all Compute Nodes in the Pool. + /// The peak used disk space in GiB across all Compute Nodes in the Pool. + /// The total number of disk read operations across all Compute Nodes in the Pool. + /// The total number of disk write operations across all Compute Nodes in the Pool. + /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. + /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. + /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. + /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. + internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, float avgCpuPercentage, float avgMemoryGiB, float peakMemoryGiB, float avgDiskGiB, float peakDiskGiB, long diskReadIOps, long diskWriteIOps, float diskReadGiB, float diskWriteGiB, float networkReadGiB, float networkWriteGiB) + { + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + AvgCpuPercentage = avgCpuPercentage; + AvgMemoryGiB = avgMemoryGiB; + PeakMemoryGiB = peakMemoryGiB; + AvgDiskGiB = avgDiskGiB; + PeakDiskGiB = peakDiskGiB; + DiskReadIOps = diskReadIOps; + DiskWriteIOps = diskWriteIOps; + DiskReadGiB = diskReadGiB; + DiskWriteGiB = diskWriteGiB; + NetworkReadGiB = networkReadGiB; + NetworkWriteGiB = networkWriteGiB; + } + + /// Initializes a new instance of . + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The average CPU usage across all Compute Nodes in the Pool (percentage per node). + /// The average memory usage in GiB across all Compute Nodes in the Pool. + /// The peak memory usage in GiB across all Compute Nodes in the Pool. + /// The average used disk space in GiB across all Compute Nodes in the Pool. + /// The peak used disk space in GiB across all Compute Nodes in the Pool. + /// The total number of disk read operations across all Compute Nodes in the Pool. + /// The total number of disk write operations across all Compute Nodes in the Pool. + /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. + /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. + /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. + /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. + /// Keeps track of any properties unknown to the library. + internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, float avgCpuPercentage, float avgMemoryGiB, float peakMemoryGiB, float avgDiskGiB, float peakDiskGiB, long diskReadIOps, long diskWriteIOps, float diskReadGiB, float diskWriteGiB, float networkReadGiB, float networkWriteGiB, IDictionary serializedAdditionalRawData) + { + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + AvgCpuPercentage = avgCpuPercentage; + AvgMemoryGiB = avgMemoryGiB; + PeakMemoryGiB = peakMemoryGiB; + AvgDiskGiB = avgDiskGiB; + PeakDiskGiB = peakDiskGiB; + DiskReadIOps = diskReadIOps; + DiskWriteIOps = diskWriteIOps; + DiskReadGiB = diskReadGiB; + DiskWriteGiB = diskWriteGiB; + NetworkReadGiB = networkReadGiB; + NetworkWriteGiB = networkWriteGiB; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolResourceStatistics() + { + } + + /// The start time of the time range covered by the statistics. + public DateTimeOffset StartTime { get; } + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + public DateTimeOffset LastUpdateTime { get; } + /// The average CPU usage across all Compute Nodes in the Pool (percentage per node). + public float AvgCpuPercentage { get; } + /// The average memory usage in GiB across all Compute Nodes in the Pool. + public float AvgMemoryGiB { get; } + /// The peak memory usage in GiB across all Compute Nodes in the Pool. + public float PeakMemoryGiB { get; } + /// The average used disk space in GiB across all Compute Nodes in the Pool. + public float AvgDiskGiB { get; } + /// The peak used disk space in GiB across all Compute Nodes in the Pool. + public float PeakDiskGiB { get; } + /// The total number of disk read operations across all Compute Nodes in the Pool. + public long DiskReadIOps { get; } + /// The total number of disk write operations across all Compute Nodes in the Pool. + public long DiskWriteIOps { get; } + /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. + public float DiskReadGiB { get; } + /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. + public float DiskWriteGiB { get; } + /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. + public float NetworkReadGiB { get; } + /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. + public float NetworkWriteGiB { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs new file mode 100644 index 0000000000000..293379cd7d853 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs @@ -0,0 +1,485 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolSpecification : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolSpecification)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + writer.WritePropertyName("vmSize"u8); + writer.WriteStringValue(VmSize); + if (Optional.IsDefined(VirtualMachineConfiguration)) + { + writer.WritePropertyName("virtualMachineConfiguration"u8); + writer.WriteObjectValue(VirtualMachineConfiguration, options); + } + if (Optional.IsDefined(TaskSlotsPerNode)) + { + writer.WritePropertyName("taskSlotsPerNode"u8); + writer.WriteNumberValue(TaskSlotsPerNode.Value); + } + if (Optional.IsDefined(TaskSchedulingPolicy)) + { + writer.WritePropertyName("taskSchedulingPolicy"u8); + writer.WriteObjectValue(TaskSchedulingPolicy, options); + } + if (Optional.IsDefined(ResizeTimeout)) + { + writer.WritePropertyName("resizeTimeout"u8); + writer.WriteStringValue(ResizeTimeout.Value, "P"); + } + if (Optional.IsDefined(ResourceTags)) + { + writer.WritePropertyName("resourceTags"u8); + writer.WriteStringValue(ResourceTags); + } + if (Optional.IsDefined(TargetDedicatedNodes)) + { + writer.WritePropertyName("targetDedicatedNodes"u8); + writer.WriteNumberValue(TargetDedicatedNodes.Value); + } + if (Optional.IsDefined(TargetLowPriorityNodes)) + { + writer.WritePropertyName("targetLowPriorityNodes"u8); + writer.WriteNumberValue(TargetLowPriorityNodes.Value); + } + if (Optional.IsDefined(EnableAutoScale)) + { + writer.WritePropertyName("enableAutoScale"u8); + writer.WriteBooleanValue(EnableAutoScale.Value); + } + if (Optional.IsDefined(AutoScaleFormula)) + { + writer.WritePropertyName("autoScaleFormula"u8); + writer.WriteStringValue(AutoScaleFormula); + } + if (Optional.IsDefined(AutoScaleEvaluationInterval)) + { + writer.WritePropertyName("autoScaleEvaluationInterval"u8); + writer.WriteStringValue(AutoScaleEvaluationInterval.Value, "P"); + } + if (Optional.IsDefined(EnableInterNodeCommunication)) + { + writer.WritePropertyName("enableInterNodeCommunication"u8); + writer.WriteBooleanValue(EnableInterNodeCommunication.Value); + } + if (Optional.IsDefined(NetworkConfiguration)) + { + writer.WritePropertyName("networkConfiguration"u8); + writer.WriteObjectValue(NetworkConfiguration, options); + } + if (Optional.IsDefined(StartTask)) + { + writer.WritePropertyName("startTask"u8); + writer.WriteObjectValue(StartTask, options); + } + if (Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(UserAccounts)) + { + writer.WritePropertyName("userAccounts"u8); + writer.WriteStartArray(); + foreach (var item in UserAccounts) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(MountConfiguration)) + { + writer.WritePropertyName("mountConfiguration"u8); + writer.WriteStartArray(); + foreach (var item in MountConfiguration) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(TargetNodeCommunicationMode)) + { + writer.WritePropertyName("targetNodeCommunicationMode"u8); + writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); + } + if (Optional.IsDefined(UpgradePolicy)) + { + writer.WritePropertyName("upgradePolicy"u8); + writer.WriteObjectValue(UpgradePolicy, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolSpecification IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolSpecification)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolSpecification(document.RootElement, options); + } + + internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string displayName = default; + string vmSize = default; + VirtualMachineConfiguration virtualMachineConfiguration = default; + int? taskSlotsPerNode = default; + BatchTaskSchedulingPolicy taskSchedulingPolicy = default; + TimeSpan? resizeTimeout = default; + string resourceTags = default; + int? targetDedicatedNodes = default; + int? targetLowPriorityNodes = default; + bool? enableAutoScale = default; + string autoScaleFormula = default; + TimeSpan? autoScaleEvaluationInterval = default; + bool? enableInterNodeCommunication = default; + NetworkConfiguration networkConfiguration = default; + BatchStartTask startTask = default; + IList applicationPackageReferences = default; + IList userAccounts = default; + IList metadata = default; + IList mountConfiguration = default; + BatchNodeCommunicationMode? targetNodeCommunicationMode = default; + UpgradePolicy upgradePolicy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("vmSize"u8)) + { + vmSize = property.Value.GetString(); + continue; + } + if (property.NameEquals("virtualMachineConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + virtualMachineConfiguration = VirtualMachineConfiguration.DeserializeVirtualMachineConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("taskSlotsPerNode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSlotsPerNode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("taskSchedulingPolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskSchedulingPolicy = BatchTaskSchedulingPolicy.DeserializeBatchTaskSchedulingPolicy(property.Value, options); + continue; + } + if (property.NameEquals("resizeTimeout"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resizeTimeout = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("resourceTags"u8)) + { + resourceTags = property.Value.GetString(); + continue; + } + if (property.NameEquals("targetDedicatedNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetDedicatedNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("targetLowPriorityNodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetLowPriorityNodes = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("enableAutoScale"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAutoScale = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("autoScaleFormula"u8)) + { + autoScaleFormula = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoScaleEvaluationInterval"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoScaleEvaluationInterval = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("enableInterNodeCommunication"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableInterNodeCommunication = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("networkConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + networkConfiguration = NetworkConfiguration.DeserializeNetworkConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("startTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("userAccounts"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(UserAccount.DeserializeUserAccount(item, options)); + } + userAccounts = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("mountConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(Batch.MountConfiguration.DeserializeMountConfiguration(item, options)); + } + mountConfiguration = array; + continue; + } + if (property.NameEquals("targetNodeCommunicationMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); + continue; + } + if (property.NameEquals("upgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + upgradePolicy = UpgradePolicy.DeserializeUpgradePolicy(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolSpecification( + displayName, + vmSize, + virtualMachineConfiguration, + taskSlotsPerNode, + taskSchedulingPolicy, + resizeTimeout, + resourceTags, + targetDedicatedNodes, + targetLowPriorityNodes, + enableAutoScale, + autoScaleFormula, + autoScaleEvaluationInterval, + enableInterNodeCommunication, + networkConfiguration, + startTask, + applicationPackageReferences ?? new ChangeTrackingList(), + userAccounts ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), + mountConfiguration ?? new ChangeTrackingList(), + targetNodeCommunicationMode, + upgradePolicy, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolSpecification)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolSpecification IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolSpecification(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolSpecification)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolSpecification FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolSpecification(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs new file mode 100644 index 0000000000000..758fe6016c02a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs @@ -0,0 +1,159 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specification for creating a new Pool. + public partial class BatchPoolSpecification + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// is null. + public BatchPoolSpecification(string vmSize) + { + Argument.AssertNotNull(vmSize, nameof(vmSize)); + + VmSize = vmSize; + ApplicationPackageReferences = new ChangeTrackingList(); + UserAccounts = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); + MountConfiguration = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The user-specified tags associated with the pool.The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false. + /// The formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. + /// The network configuration for the Pool. + /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + /// The list of user Accounts to be created on each Compute Node in the Pool. + /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + /// The desired node communication mode for the pool. If omitted, the default value is Default. + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + /// Keeps track of any properties unknown to the library. + internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, string resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + { + DisplayName = displayName; + VmSize = vmSize; + VirtualMachineConfiguration = virtualMachineConfiguration; + TaskSlotsPerNode = taskSlotsPerNode; + TaskSchedulingPolicy = taskSchedulingPolicy; + ResizeTimeout = resizeTimeout; + ResourceTags = resourceTags; + TargetDedicatedNodes = targetDedicatedNodes; + TargetLowPriorityNodes = targetLowPriorityNodes; + EnableAutoScale = enableAutoScale; + AutoScaleFormula = autoScaleFormula; + AutoScaleEvaluationInterval = autoScaleEvaluationInterval; + EnableInterNodeCommunication = enableInterNodeCommunication; + NetworkConfiguration = networkConfiguration; + StartTask = startTask; + ApplicationPackageReferences = applicationPackageReferences; + UserAccounts = userAccounts; + Metadata = metadata; + MountConfiguration = mountConfiguration; + TargetNodeCommunicationMode = targetNodeCommunicationMode; + UpgradePolicy = upgradePolicy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolSpecification() + { + } + + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// The size of the virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + public string VmSize { get; set; } + /// The virtual machine configuration for the Pool. This property must be specified if the Pool needs to be created with Azure IaaS VMs. If it is not specified then the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + public int? TaskSlotsPerNode { get; set; } + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; set; } + /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service rejects the request with an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? ResizeTimeout { get; set; } + /// The user-specified tags associated with the pool.The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + public string ResourceTags { get; set; } + /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + public int? TargetDedicatedNodes { get; set; } + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + public int? TargetLowPriorityNodes { get; set; } + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula element is required. The Pool automatically resizes according to the formula. The default value is false. + public bool? EnableAutoScale { get; set; } + /// The formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. + public string AutoScaleFormula { get; set; } + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public TimeSpan? AutoScaleEvaluationInterval { get; set; } + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. + public bool? EnableInterNodeCommunication { get; set; } + /// The network configuration for the Pool. + public NetworkConfiguration NetworkConfiguration { get; set; } + /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + public BatchStartTask StartTask { get; set; } + /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + public IList ApplicationPackageReferences { get; } + /// The list of user Accounts to be created on each Compute Node in the Pool. + public IList UserAccounts { get; } + /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + public IList Metadata { get; } + /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + public IList MountConfiguration { get; } + /// The desired node communication mode for the pool. If omitted, the default value is Default. + public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + public UpgradePolicy UpgradePolicy { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolState.cs new file mode 100644 index 0000000000000..c103098e68c8e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolState.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchPoolState enums. + public readonly partial struct BatchPoolState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchPoolState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ActiveValue = "active"; + private const string DeletingValue = "deleting"; + + /// The Pool is available to run Tasks subject to the availability of Compute Nodes. + public static BatchPoolState Active { get; } = new BatchPoolState(ActiveValue); + /// The user has requested that the Pool be deleted, but the delete operation has not yet completed. + public static BatchPoolState Deleting { get; } = new BatchPoolState(DeletingValue); + /// Determines if two values are the same. + public static bool operator ==(BatchPoolState left, BatchPoolState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchPoolState left, BatchPoolState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchPoolState(string value) => new BatchPoolState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchPoolState other && Equals(other); + /// + public bool Equals(BatchPoolState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs new file mode 100644 index 0000000000000..8f3aaf2e74929 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolStatistics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolStatistics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + if (Optional.IsDefined(UsageStats)) + { + writer.WritePropertyName("usageStats"u8); + writer.WriteObjectValue(UsageStats, options); + } + if (Optional.IsDefined(ResourceStats)) + { + writer.WritePropertyName("resourceStats"u8); + writer.WriteObjectValue(ResourceStats, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolStatistics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolStatistics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolStatistics(document.RootElement, options); + } + + internal static BatchPoolStatistics DeserializeBatchPoolStatistics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string url = default; + DateTimeOffset startTime = default; + DateTimeOffset lastUpdateTime = default; + BatchPoolUsageStatistics usageStats = default; + BatchPoolResourceStatistics resourceStats = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("usageStats"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + usageStats = BatchPoolUsageStatistics.DeserializeBatchPoolUsageStatistics(property.Value, options); + continue; + } + if (property.NameEquals("resourceStats"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resourceStats = BatchPoolResourceStatistics.DeserializeBatchPoolResourceStatistics(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolStatistics( + url, + startTime, + lastUpdateTime, + usageStats, + resourceStats, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolStatistics)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolStatistics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolStatistics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolStatistics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolStatistics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolStatistics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs new file mode 100644 index 0000000000000..ca9bdfab38e36 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Contains utilization and resource usage statistics for the lifetime of a Pool. + public partial class BatchPoolStatistics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URL for the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// is null. + internal BatchPoolStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime) + { + Argument.AssertNotNull(url, nameof(url)); + + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + } + + /// Initializes a new instance of . + /// The URL for the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// Statistics related to Pool usage, such as the amount of core-time used. + /// Statistics related to resource consumption by Compute Nodes in the Pool. + /// Keeps track of any properties unknown to the library. + internal BatchPoolStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, BatchPoolUsageStatistics usageStats, BatchPoolResourceStatistics resourceStats, IDictionary serializedAdditionalRawData) + { + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UsageStats = usageStats; + ResourceStats = resourceStats; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolStatistics() + { + } + + /// The URL for the statistics. + public string Url { get; } + /// The start time of the time range covered by the statistics. + public DateTimeOffset StartTime { get; } + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + public DateTimeOffset LastUpdateTime { get; } + /// Statistics related to Pool usage, such as the amount of core-time used. + public BatchPoolUsageStatistics UsageStats { get; } + /// Statistics related to resource consumption by Compute Nodes in the Pool. + public BatchPoolResourceStatistics ResourceStats { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs new file mode 100644 index 0000000000000..1bcfc367c703b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs @@ -0,0 +1,207 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolUpdateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(StartTask)) + { + writer.WritePropertyName("startTask"u8); + writer.WriteObjectValue(StartTask, options); + } + if (Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(Metadata)) + { + writer.WritePropertyName("metadata"u8); + writer.WriteStartArray(); + foreach (var item in Metadata) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(TargetNodeCommunicationMode)) + { + writer.WritePropertyName("targetNodeCommunicationMode"u8); + writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolUpdateContent(document.RootElement, options); + } + + internal static BatchPoolUpdateContent DeserializeBatchPoolUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchStartTask startTask = default; + IList applicationPackageReferences = default; + IList metadata = default; + BatchNodeCommunicationMode? targetNodeCommunicationMode = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTask"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("metadata"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + } + metadata = array; + continue; + } + if (property.NameEquals("targetNodeCommunicationMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolUpdateContent(startTask, applicationPackageReferences ?? new ChangeTrackingList(), metadata ?? new ChangeTrackingList(), targetNodeCommunicationMode, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolUpdateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolUpdateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolUpdateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs new file mode 100644 index 0000000000000..1300eb052c5c8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for updating an Azure Batch Pool. + public partial class BatchPoolUpdateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchPoolUpdateContent() + { + ApplicationPackageReferences = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. + /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. + /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. + /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. + /// Keeps track of any properties unknown to the library. + internal BatchPoolUpdateContent(BatchStartTask startTask, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) + { + StartTask = startTask; + ApplicationPackageReferences = applicationPackageReferences; + Metadata = metadata; + TargetNodeCommunicationMode = targetNodeCommunicationMode; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. + public BatchStartTask StartTask { get; set; } + /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. + public IList ApplicationPackageReferences { get; } + /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. + public IList Metadata { get; } + /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. + public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.Serialization.cs new file mode 100644 index 0000000000000..baa7f92a088de --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.Serialization.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolUsageMetrics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolUsageMetrics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("poolId"u8); + writer.WriteStringValue(PoolId); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime, "O"); + writer.WritePropertyName("vmSize"u8); + writer.WriteStringValue(VmSize); + writer.WritePropertyName("totalCoreHours"u8); + writer.WriteNumberValue(TotalCoreHours); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolUsageMetrics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolUsageMetrics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolUsageMetrics(document.RootElement, options); + } + + internal static BatchPoolUsageMetrics DeserializeBatchPoolUsageMetrics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string poolId = default; + DateTimeOffset startTime = default; + DateTimeOffset endTime = default; + string vmSize = default; + float totalCoreHours = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("poolId"u8)) + { + poolId = property.Value.GetString(); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("vmSize"u8)) + { + vmSize = property.Value.GetString(); + continue; + } + if (property.NameEquals("totalCoreHours"u8)) + { + totalCoreHours = property.Value.GetSingle(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolUsageMetrics( + poolId, + startTime, + endTime, + vmSize, + totalCoreHours, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolUsageMetrics)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolUsageMetrics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolUsageMetrics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolUsageMetrics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolUsageMetrics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolUsageMetrics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs new file mode 100644 index 0000000000000..c28463c079071 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageMetrics.cs @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Usage metrics for a Pool across an aggregation interval. + public partial class BatchPoolUsageMetrics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the Pool whose metrics are aggregated in this entry. + /// The start time of the aggregation interval covered by this entry. + /// The end time of the aggregation interval covered by this entry. + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The total core hours used in the Pool during this aggregation interval. + /// or is null. + internal BatchPoolUsageMetrics(string poolId, DateTimeOffset startTime, DateTimeOffset endTime, string vmSize, float totalCoreHours) + { + Argument.AssertNotNull(poolId, nameof(poolId)); + Argument.AssertNotNull(vmSize, nameof(vmSize)); + + PoolId = poolId; + StartTime = startTime; + EndTime = endTime; + VmSize = vmSize; + TotalCoreHours = totalCoreHours; + } + + /// Initializes a new instance of . + /// The ID of the Pool whose metrics are aggregated in this entry. + /// The start time of the aggregation interval covered by this entry. + /// The end time of the aggregation interval covered by this entry. + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The total core hours used in the Pool during this aggregation interval. + /// Keeps track of any properties unknown to the library. + internal BatchPoolUsageMetrics(string poolId, DateTimeOffset startTime, DateTimeOffset endTime, string vmSize, float totalCoreHours, IDictionary serializedAdditionalRawData) + { + PoolId = poolId; + StartTime = startTime; + EndTime = endTime; + VmSize = vmSize; + TotalCoreHours = totalCoreHours; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolUsageMetrics() + { + } + + /// The ID of the Pool whose metrics are aggregated in this entry. + public string PoolId { get; } + /// The start time of the aggregation interval covered by this entry. + public DateTimeOffset StartTime { get; } + /// The end time of the aggregation interval covered by this entry. + public DateTimeOffset EndTime { get; } + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + public string VmSize { get; } + /// The total core hours used in the Pool during this aggregation interval. + public float TotalCoreHours { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageStatistics.Serialization.cs new file mode 100644 index 0000000000000..3643fcc75cebf --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageStatistics.Serialization.cs @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchPoolUsageStatistics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolUsageStatistics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + writer.WritePropertyName("dedicatedCoreTime"u8); + writer.WriteStringValue(DedicatedCoreTime, "P"); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchPoolUsageStatistics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchPoolUsageStatistics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchPoolUsageStatistics(document.RootElement, options); + } + + internal static BatchPoolUsageStatistics DeserializeBatchPoolUsageStatistics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset startTime = default; + DateTimeOffset lastUpdateTime = default; + TimeSpan dedicatedCoreTime = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("dedicatedCoreTime"u8)) + { + dedicatedCoreTime = property.Value.GetTimeSpan("P"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchPoolUsageStatistics(startTime, lastUpdateTime, dedicatedCoreTime, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchPoolUsageStatistics)} does not support writing '{options.Format}' format."); + } + } + + BatchPoolUsageStatistics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchPoolUsageStatistics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchPoolUsageStatistics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchPoolUsageStatistics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchPoolUsageStatistics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageStatistics.cs new file mode 100644 index 0000000000000..7912c9f525105 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUsageStatistics.cs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Statistics related to Pool usage information. + public partial class BatchPoolUsageStatistics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. + internal BatchPoolUsageStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan dedicatedCoreTime) + { + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + DedicatedCoreTime = dedicatedCoreTime; + } + + /// Initializes a new instance of . + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. + /// Keeps track of any properties unknown to the library. + internal BatchPoolUsageStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan dedicatedCoreTime, IDictionary serializedAdditionalRawData) + { + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + DedicatedCoreTime = dedicatedCoreTime; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchPoolUsageStatistics() + { + } + + /// The start time of the time range covered by the statistics. + public DateTimeOffset StartTime { get; } + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + public DateTimeOffset LastUpdateTime { get; } + /// The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. + public TimeSpan DedicatedCoreTime { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.Serialization.cs new file mode 100644 index 0000000000000..d2d681b9cd1df --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.Serialization.cs @@ -0,0 +1,253 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchStartTask : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchStartTask)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("commandLine"u8); + writer.WriteStringValue(CommandLine); + if (Optional.IsDefined(ContainerSettings)) + { + writer.WritePropertyName("containerSettings"u8); + writer.WriteObjectValue(ContainerSettings, options); + } + if (Optional.IsCollectionDefined(ResourceFiles)) + { + writer.WritePropertyName("resourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in ResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(EnvironmentSettings)) + { + writer.WritePropertyName("environmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in EnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(UserIdentity)) + { + writer.WritePropertyName("userIdentity"u8); + writer.WriteObjectValue(UserIdentity, options); + } + if (Optional.IsDefined(MaxTaskRetryCount)) + { + writer.WritePropertyName("maxTaskRetryCount"u8); + writer.WriteNumberValue(MaxTaskRetryCount.Value); + } + if (Optional.IsDefined(WaitForSuccess)) + { + writer.WritePropertyName("waitForSuccess"u8); + writer.WriteBooleanValue(WaitForSuccess.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchStartTask IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchStartTask)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchStartTask(document.RootElement, options); + } + + internal static BatchStartTask DeserializeBatchStartTask(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string commandLine = default; + BatchTaskContainerSettings containerSettings = default; + IList resourceFiles = default; + IList environmentSettings = default; + UserIdentity userIdentity = default; + int? maxTaskRetryCount = default; + bool? waitForSuccess = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("commandLine"u8)) + { + commandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerSettings = BatchTaskContainerSettings.DeserializeBatchTaskContainerSettings(property.Value, options); + continue; + } + if (property.NameEquals("resourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + resourceFiles = array; + continue; + } + if (property.NameEquals("environmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + environmentSettings = array; + continue; + } + if (property.NameEquals("userIdentity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + userIdentity = UserIdentity.DeserializeUserIdentity(property.Value, options); + continue; + } + if (property.NameEquals("maxTaskRetryCount"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxTaskRetryCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("waitForSuccess"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + waitForSuccess = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchStartTask( + commandLine, + containerSettings, + resourceFiles ?? new ChangeTrackingList(), + environmentSettings ?? new ChangeTrackingList(), + userIdentity, + maxTaskRetryCount, + waitForSuccess, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchStartTask)} does not support writing '{options.Format}' format."); + } + } + + BatchStartTask IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchStartTask(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchStartTask)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchStartTask FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchStartTask(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs new file mode 100644 index 0000000000000..d83549ff06989 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTask.cs @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Batch will retry Tasks when a recovery operation is triggered on a Node. + /// Examples of recovery operations include (but are not limited to) when an + /// unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + /// Retries due to recovery operations are independent of and are not counted + /// against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + /// retry due to a recovery operation may occur. Because of this, all Tasks should + /// be idempotent. This means Tasks need to tolerate being interrupted and + /// restarted without causing any corruption or duplicate data. The best practice + /// for long running Tasks is to use some form of checkpointing. In some cases the + /// StartTask may be re-run even though the Compute Node was not rebooted. Special + /// care should be taken to avoid StartTasks which create breakaway process or + /// install/launch services from the StartTask working directory, as this will + /// block Batch from being able to re-run the StartTask. + /// + public partial class BatchStartTask + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// is null. + public BatchStartTask(string commandLine) + { + Argument.AssertNotNull(commandLine, nameof(commandLine)); + + CommandLine = commandLine; + ResourceFiles = new ChangeTrackingList(); + EnvironmentSettings = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. + /// A list of environment variable settings for the StartTask. + /// The user identity under which the StartTask runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). + /// Whether the Batch service should wait for the StartTask to complete successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true. + /// Keeps track of any properties unknown to the library. + internal BatchStartTask(string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList environmentSettings, UserIdentity userIdentity, int? maxTaskRetryCount, bool? waitForSuccess, IDictionary serializedAdditionalRawData) + { + CommandLine = commandLine; + ContainerSettings = containerSettings; + ResourceFiles = resourceFiles; + EnvironmentSettings = environmentSettings; + UserIdentity = userIdentity; + MaxTaskRetryCount = maxTaskRetryCount; + WaitForSuccess = waitForSuccess; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchStartTask() + { + } + + /// The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + public string CommandLine { get; set; } + /// The settings for the container under which the StartTask runs. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + public BatchTaskContainerSettings ContainerSettings { get; set; } + /// A list of files that the Batch service will download to the Compute Node before running the command line. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. Files listed under this element are located in the Task's working directory. + public IList ResourceFiles { get; } + /// A list of environment variable settings for the StartTask. + public IList EnvironmentSettings { get; } + /// The user identity under which the StartTask runs. If omitted, the Task runs as a non-administrative user unique to the Task. + public UserIdentity UserIdentity { get; set; } + /// The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). + public int? MaxTaskRetryCount { get; set; } + /// Whether the Batch service should wait for the StartTask to complete successfully (that is, to exit with exit code 0) before scheduling any Tasks on the Compute Node. If true and the StartTask fails on a Node, the Batch service retries the StartTask up to its maximum retry count (maxTaskRetryCount). If the Task has still not completed successfully after all retries, then the Batch service marks the Node unusable, and will not schedule Tasks to it. This condition can be detected via the Compute Node state and failure info details. If false, the Batch service will not wait for the StartTask to complete. In this case, other Tasks can start executing on the Compute Node while the StartTask is still running; and even if the StartTask fails, new Tasks will continue to be scheduled on the Compute Node. The default is true. + public bool? WaitForSuccess { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskInfo.Serialization.cs new file mode 100644 index 0000000000000..6f0dd8e100dfc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskInfo.Serialization.cs @@ -0,0 +1,251 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchStartTaskInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchStartTaskInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.ToString()); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + if (Optional.IsDefined(ExitCode)) + { + writer.WritePropertyName("exitCode"u8); + writer.WriteNumberValue(ExitCode.Value); + } + if (Optional.IsDefined(ContainerInfo)) + { + writer.WritePropertyName("containerInfo"u8); + writer.WriteObjectValue(ContainerInfo, options); + } + if (Optional.IsDefined(FailureInfo)) + { + writer.WritePropertyName("failureInfo"u8); + writer.WriteObjectValue(FailureInfo, options); + } + writer.WritePropertyName("retryCount"u8); + writer.WriteNumberValue(RetryCount); + if (Optional.IsDefined(LastRetryTime)) + { + writer.WritePropertyName("lastRetryTime"u8); + writer.WriteStringValue(LastRetryTime.Value, "O"); + } + if (Optional.IsDefined(Result)) + { + writer.WritePropertyName("result"u8); + writer.WriteStringValue(Result.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchStartTaskInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchStartTaskInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchStartTaskInfo(document.RootElement, options); + } + + internal static BatchStartTaskInfo DeserializeBatchStartTaskInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchStartTaskState state = default; + DateTimeOffset startTime = default; + DateTimeOffset? endTime = default; + int? exitCode = default; + BatchTaskContainerExecutionInfo containerInfo = default; + BatchTaskFailureInfo failureInfo = default; + int retryCount = default; + DateTimeOffset? lastRetryTime = default; + BatchTaskExecutionResult? result = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("state"u8)) + { + state = new BatchStartTaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("exitCode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitCode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("containerInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerInfo = BatchTaskContainerExecutionInfo.DeserializeBatchTaskContainerExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("failureInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + failureInfo = BatchTaskFailureInfo.DeserializeBatchTaskFailureInfo(property.Value, options); + continue; + } + if (property.NameEquals("retryCount"u8)) + { + retryCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("lastRetryTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastRetryTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("result"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + result = new BatchTaskExecutionResult(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchStartTaskInfo( + state, + startTime, + endTime, + exitCode, + containerInfo, + failureInfo, + retryCount, + lastRetryTime, + result, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchStartTaskInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchStartTaskInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchStartTaskInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchStartTaskInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchStartTaskInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchStartTaskInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskInfo.cs new file mode 100644 index 0000000000000..d364c0c8160f7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskInfo.cs @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about a StartTask running on a Compute Node. + public partial class BatchStartTaskInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The state of the StartTask on the Compute Node. + /// The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + internal BatchStartTaskInfo(BatchStartTaskState state, DateTimeOffset startTime, int retryCount) + { + State = state; + StartTime = startTime; + RetryCount = retryCount; + } + + /// Initializes a new instance of . + /// The state of the StartTask on the Compute Node. + /// The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). + /// The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. + /// The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + /// The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// Keeps track of any properties unknown to the library. + internal BatchStartTaskInfo(BatchStartTaskState state, DateTimeOffset startTime, DateTimeOffset? endTime, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, int retryCount, DateTimeOffset? lastRetryTime, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + { + State = state; + StartTime = startTime; + EndTime = endTime; + ExitCode = exitCode; + ContainerInfo = containerInfo; + FailureInfo = failureInfo; + RetryCount = retryCount; + LastRetryTime = lastRetryTime; + Result = result; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchStartTaskInfo() + { + } + + /// The state of the StartTask on the Compute Node. + public BatchStartTaskState State { get; } + /// The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). + public DateTimeOffset StartTime { get; } + /// The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. + public DateTimeOffset? EndTime { get; } + /// The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + public int? ExitCode { get; } + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + public BatchTaskContainerExecutionInfo ContainerInfo { get; } + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + public BatchTaskFailureInfo FailureInfo { get; } + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + public int RetryCount { get; } + /// The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + public DateTimeOffset? LastRetryTime { get; } + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + public BatchTaskExecutionResult? Result { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskState.cs new file mode 100644 index 0000000000000..24a34f0db0384 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchStartTaskState.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchStartTaskState enums. + public readonly partial struct BatchStartTaskState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchStartTaskState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RunningValue = "running"; + private const string CompletedValue = "completed"; + + /// The StartTask is currently running. + public static BatchStartTaskState Running { get; } = new BatchStartTaskState(RunningValue); + /// The StartTask has exited with exit code 0, or the StartTask has failed and the retry limit has reached, or the StartTask process did not run due to Task preparation errors (such as resource file download failures). + public static BatchStartTaskState Completed { get; } = new BatchStartTaskState(CompletedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchStartTaskState left, BatchStartTaskState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchStartTaskState left, BatchStartTaskState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchStartTaskState(string value) => new BatchStartTaskState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchStartTaskState other && Equals(other); + /// + public bool Equals(BatchStartTaskState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtask.Serialization.cs new file mode 100644 index 0000000000000..a5fc546e0d6b1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtask.Serialization.cs @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchSubtask : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchSubtask)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteNumberValue(Id.Value); + } + if (Optional.IsDefined(NodeInfo)) + { + writer.WritePropertyName("nodeInfo"u8); + writer.WriteObjectValue(NodeInfo, options); + } + if (Optional.IsDefined(StartTime)) + { + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime.Value, "O"); + } + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + if (Optional.IsDefined(ExitCode)) + { + writer.WritePropertyName("exitCode"u8); + writer.WriteNumberValue(ExitCode.Value); + } + if (Optional.IsDefined(ContainerInfo)) + { + writer.WritePropertyName("containerInfo"u8); + writer.WriteObjectValue(ContainerInfo, options); + } + if (Optional.IsDefined(FailureInfo)) + { + writer.WritePropertyName("failureInfo"u8); + writer.WriteObjectValue(FailureInfo, options); + } + if (Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (Optional.IsDefined(PreviousState)) + { + writer.WritePropertyName("previousState"u8); + writer.WriteStringValue(PreviousState.Value.ToString()); + } + if (Optional.IsDefined(PreviousStateTransitionTime)) + { + writer.WritePropertyName("previousStateTransitionTime"u8); + writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); + } + if (Optional.IsDefined(Result)) + { + writer.WritePropertyName("result"u8); + writer.WriteStringValue(Result.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchSubtask IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchSubtask)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchSubtask(document.RootElement, options); + } + + internal static BatchSubtask DeserializeBatchSubtask(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int? id = default; + BatchNodeInfo nodeInfo = default; + DateTimeOffset? startTime = default; + DateTimeOffset? endTime = default; + int? exitCode = default; + BatchTaskContainerExecutionInfo containerInfo = default; + BatchTaskFailureInfo failureInfo = default; + BatchSubtaskState? state = default; + DateTimeOffset? stateTransitionTime = default; + BatchSubtaskState? previousState = default; + DateTimeOffset? previousStateTransitionTime = default; + BatchTaskExecutionResult? result = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + id = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("nodeInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeInfo = BatchNodeInfo.DeserializeBatchNodeInfo(property.Value, options); + continue; + } + if (property.NameEquals("startTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("exitCode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitCode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("containerInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerInfo = BatchTaskContainerExecutionInfo.DeserializeBatchTaskContainerExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("failureInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + failureInfo = BatchTaskFailureInfo.DeserializeBatchTaskFailureInfo(property.Value, options); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchSubtaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("previousState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousState = new BatchSubtaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("previousStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("result"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + result = new BatchTaskExecutionResult(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchSubtask( + id, + nodeInfo, + startTime, + endTime, + exitCode, + containerInfo, + failureInfo, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + result, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchSubtask)} does not support writing '{options.Format}' format."); + } + } + + BatchSubtask IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchSubtask(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchSubtask)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchSubtask FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchSubtask(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtask.cs new file mode 100644 index 0000000000000..39c971bd37fbf --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtask.cs @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about an Azure Batch subtask. + public partial class BatchSubtask + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchSubtask() + { + } + + /// Initializes a new instance of . + /// The ID of the subtask. + /// Information about the Compute Node on which the subtask ran. + /// The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. + /// The time at which the subtask completed. This property is set only if the subtask is in the Completed state. + /// The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The current state of the subtask. + /// The time at which the subtask entered its current state. + /// The previous state of the subtask. This property is not set if the subtask is in its initial running state. + /// The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// Keeps track of any properties unknown to the library. + internal BatchSubtask(int? id, BatchNodeInfo nodeInfo, DateTimeOffset? startTime, DateTimeOffset? endTime, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, BatchSubtaskState? state, DateTimeOffset? stateTransitionTime, BatchSubtaskState? previousState, DateTimeOffset? previousStateTransitionTime, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + { + Id = id; + NodeInfo = nodeInfo; + StartTime = startTime; + EndTime = endTime; + ExitCode = exitCode; + ContainerInfo = containerInfo; + FailureInfo = failureInfo; + State = state; + StateTransitionTime = stateTransitionTime; + PreviousState = previousState; + PreviousStateTransitionTime = previousStateTransitionTime; + Result = result; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ID of the subtask. + public int? Id { get; } + /// Information about the Compute Node on which the subtask ran. + public BatchNodeInfo NodeInfo { get; } + /// The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. + public DateTimeOffset? StartTime { get; } + /// The time at which the subtask completed. This property is set only if the subtask is in the Completed state. + public DateTimeOffset? EndTime { get; } + /// The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + public int? ExitCode { get; } + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + public BatchTaskContainerExecutionInfo ContainerInfo { get; } + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + public BatchTaskFailureInfo FailureInfo { get; } + /// The current state of the subtask. + public BatchSubtaskState? State { get; } + /// The time at which the subtask entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The previous state of the subtask. This property is not set if the subtask is in its initial running state. + public BatchSubtaskState? PreviousState { get; } + /// The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. + public DateTimeOffset? PreviousStateTransitionTime { get; } + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + public BatchTaskExecutionResult? Result { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtaskState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtaskState.cs new file mode 100644 index 0000000000000..55ecc85c4f03d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSubtaskState.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchSubtaskState enums. + public readonly partial struct BatchSubtaskState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchSubtaskState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string PreparingValue = "preparing"; + private const string RunningValue = "running"; + private const string CompletedValue = "completed"; + + /// The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. + public static BatchSubtaskState Preparing { get; } = new BatchSubtaskState(PreparingValue); + /// The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. + public static BatchSubtaskState Running { get; } = new BatchSubtaskState(RunningValue); + /// The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. + public static BatchSubtaskState Completed { get; } = new BatchSubtaskState(CompletedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchSubtaskState left, BatchSubtaskState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchSubtaskState left, BatchSubtaskState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchSubtaskState(string value) => new BatchSubtaskState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchSubtaskState other && Equals(other); + /// + public bool Equals(BatchSubtaskState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs new file mode 100644 index 0000000000000..18ce9f3f0a497 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchSupportedImage : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchSupportedImage)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("nodeAgentSKUId"u8); + writer.WriteStringValue(NodeAgentSkuId); + writer.WritePropertyName("imageReference"u8); + writer.WriteObjectValue(ImageReference, options); + writer.WritePropertyName("osType"u8); + writer.WriteStringValue(OsType.ToString()); + if (Optional.IsCollectionDefined(Capabilities)) + { + writer.WritePropertyName("capabilities"u8); + writer.WriteStartArray(); + foreach (var item in Capabilities) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(BatchSupportEndOfLife)) + { + writer.WritePropertyName("batchSupportEndOfLife"u8); + writer.WriteStringValue(BatchSupportEndOfLife.Value, "O"); + } + writer.WritePropertyName("verificationType"u8); + writer.WriteStringValue(VerificationType.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchSupportedImage IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchSupportedImage)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchSupportedImage(document.RootElement, options); + } + + internal static BatchSupportedImage DeserializeBatchSupportedImage(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string nodeAgentSKUId = default; + ImageReference imageReference = default; + OSType osType = default; + IReadOnlyList capabilities = default; + DateTimeOffset? batchSupportEndOfLife = default; + ImageVerificationType verificationType = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeAgentSKUId"u8)) + { + nodeAgentSKUId = property.Value.GetString(); + continue; + } + if (property.NameEquals("imageReference"u8)) + { + imageReference = ImageReference.DeserializeImageReference(property.Value, options); + continue; + } + if (property.NameEquals("osType"u8)) + { + osType = new OSType(property.Value.GetString()); + continue; + } + if (property.NameEquals("capabilities"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + capabilities = array; + continue; + } + if (property.NameEquals("batchSupportEndOfLife"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + batchSupportEndOfLife = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("verificationType"u8)) + { + verificationType = new ImageVerificationType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchSupportedImage( + nodeAgentSKUId, + imageReference, + osType, + capabilities ?? new ChangeTrackingList(), + batchSupportEndOfLife, + verificationType, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchSupportedImage)} does not support writing '{options.Format}' format."); + } + } + + BatchSupportedImage IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchSupportedImage(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchSupportedImage)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchSupportedImage FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchSupportedImage(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs new file mode 100644 index 0000000000000..dcc24fa9d8e86 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A reference to the Azure Virtual Machines Marketplace Image and additional + /// information about the Image. + /// + public partial class BatchSupportedImage + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ID of the Compute Node agent SKU which the Image supports. + /// The reference to the Azure Virtual Machine's Marketplace Image. + /// The type of operating system (e.g. Windows or Linux) of the Image. + /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. + /// or is null. + internal BatchSupportedImage(string nodeAgentSkuId, ImageReference imageReference, OSType osType, ImageVerificationType verificationType) + { + Argument.AssertNotNull(nodeAgentSkuId, nameof(nodeAgentSkuId)); + Argument.AssertNotNull(imageReference, nameof(imageReference)); + + NodeAgentSkuId = nodeAgentSkuId; + ImageReference = imageReference; + OsType = osType; + Capabilities = new ChangeTrackingList(); + VerificationType = verificationType; + } + + /// Initializes a new instance of . + /// The ID of the Compute Node agent SKU which the Image supports. + /// The reference to the Azure Virtual Machine's Marketplace Image. + /// The type of operating system (e.g. Windows or Linux) of the Image. + /// The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. + /// The time when the Azure Batch service will stop accepting create Pool requests for the Image. + /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. + /// Keeps track of any properties unknown to the library. + internal BatchSupportedImage(string nodeAgentSkuId, ImageReference imageReference, OSType osType, IReadOnlyList capabilities, DateTimeOffset? batchSupportEndOfLife, ImageVerificationType verificationType, IDictionary serializedAdditionalRawData) + { + NodeAgentSkuId = nodeAgentSkuId; + ImageReference = imageReference; + OsType = osType; + Capabilities = capabilities; + BatchSupportEndOfLife = batchSupportEndOfLife; + VerificationType = verificationType; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchSupportedImage() + { + } + + /// The ID of the Compute Node agent SKU which the Image supports. + public string NodeAgentSkuId { get; } + /// The reference to the Azure Virtual Machine's Marketplace Image. + public ImageReference ImageReference { get; } + /// The type of operating system (e.g. Windows or Linux) of the Image. + public OSType OsType { get; } + /// The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. + public IReadOnlyList Capabilities { get; } + /// The time when the Azure Batch service will stop accepting create Pool requests for the Image. + public DateTimeOffset? BatchSupportEndOfLife { get; } + /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. + public ImageVerificationType VerificationType { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs new file mode 100644 index 0000000000000..1c82899a42e69 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs @@ -0,0 +1,580 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTask : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTask)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (options.Format != "W" && Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (options.Format != "W" && Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (options.Format != "W" && Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && Optional.IsDefined(ETag)) + { + writer.WritePropertyName("eTag"u8); + writer.WriteStringValue(ETag); + } + if (options.Format != "W" && Optional.IsDefined(LastModified)) + { + writer.WritePropertyName("lastModified"u8); + writer.WriteStringValue(LastModified.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(CreationTime)) + { + writer.WritePropertyName("creationTime"u8); + writer.WriteStringValue(CreationTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(ExitConditions)) + { + writer.WritePropertyName("exitConditions"u8); + writer.WriteObjectValue(ExitConditions, options); + } + if (options.Format != "W" && Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(StateTransitionTime)) + { + writer.WritePropertyName("stateTransitionTime"u8); + writer.WriteStringValue(StateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(PreviousState)) + { + writer.WritePropertyName("previousState"u8); + writer.WriteStringValue(PreviousState.Value.ToString()); + } + if (options.Format != "W" && Optional.IsDefined(PreviousStateTransitionTime)) + { + writer.WritePropertyName("previousStateTransitionTime"u8); + writer.WriteStringValue(PreviousStateTransitionTime.Value, "O"); + } + if (options.Format != "W" && Optional.IsDefined(CommandLine)) + { + writer.WritePropertyName("commandLine"u8); + writer.WriteStringValue(CommandLine); + } + if (options.Format != "W" && Optional.IsDefined(ContainerSettings)) + { + writer.WritePropertyName("containerSettings"u8); + writer.WriteObjectValue(ContainerSettings, options); + } + if (options.Format != "W" && Optional.IsCollectionDefined(ResourceFiles)) + { + writer.WritePropertyName("resourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in ResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsCollectionDefined(OutputFiles)) + { + writer.WritePropertyName("outputFiles"u8); + writer.WriteStartArray(); + foreach (var item in OutputFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsCollectionDefined(EnvironmentSettings)) + { + writer.WritePropertyName("environmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in EnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(AffinityInfo)) + { + writer.WritePropertyName("affinityInfo"u8); + writer.WriteObjectValue(AffinityInfo, options); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (options.Format != "W" && Optional.IsDefined(RequiredSlots)) + { + writer.WritePropertyName("requiredSlots"u8); + writer.WriteNumberValue(RequiredSlots.Value); + } + if (options.Format != "W" && Optional.IsDefined(UserIdentity)) + { + writer.WritePropertyName("userIdentity"u8); + writer.WriteObjectValue(UserIdentity, options); + } + if (options.Format != "W" && Optional.IsDefined(ExecutionInfo)) + { + writer.WritePropertyName("executionInfo"u8); + writer.WriteObjectValue(ExecutionInfo, options); + } + if (options.Format != "W" && Optional.IsDefined(NodeInfo)) + { + writer.WritePropertyName("nodeInfo"u8); + writer.WriteObjectValue(NodeInfo, options); + } + if (options.Format != "W" && Optional.IsDefined(MultiInstanceSettings)) + { + writer.WritePropertyName("multiInstanceSettings"u8); + writer.WriteObjectValue(MultiInstanceSettings, options); + } + if (options.Format != "W" && Optional.IsDefined(Stats)) + { + writer.WritePropertyName("stats"u8); + writer.WriteObjectValue(Stats, options); + } + if (options.Format != "W" && Optional.IsDefined(DependsOn)) + { + writer.WritePropertyName("dependsOn"u8); + writer.WriteObjectValue(DependsOn, options); + } + if (options.Format != "W" && Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && Optional.IsDefined(AuthenticationTokenSettings)) + { + writer.WritePropertyName("authenticationTokenSettings"u8); + writer.WriteObjectValue(AuthenticationTokenSettings, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTask IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTask)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTask(document.RootElement, options); + } + + internal static BatchTask DeserializeBatchTask(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + string url = default; + string eTag = default; + DateTimeOffset? lastModified = default; + DateTimeOffset? creationTime = default; + ExitConditions exitConditions = default; + BatchTaskState? state = default; + DateTimeOffset? stateTransitionTime = default; + BatchTaskState? previousState = default; + DateTimeOffset? previousStateTransitionTime = default; + string commandLine = default; + BatchTaskContainerSettings containerSettings = default; + IReadOnlyList resourceFiles = default; + IReadOnlyList outputFiles = default; + IReadOnlyList environmentSettings = default; + AffinityInfo affinityInfo = default; + BatchTaskConstraints constraints = default; + int? requiredSlots = default; + UserIdentity userIdentity = default; + BatchTaskExecutionInfo executionInfo = default; + BatchNodeInfo nodeInfo = default; + MultiInstanceSettings multiInstanceSettings = default; + BatchTaskStatistics stats = default; + BatchTaskDependencies dependsOn = default; + IReadOnlyList applicationPackageReferences = default; + AuthenticationTokenSettings authenticationTokenSettings = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("eTag"u8)) + { + eTag = property.Value.GetString(); + continue; + } + if (property.NameEquals("lastModified"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastModified = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("creationTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + creationTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("exitConditions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitConditions = ExitConditions.DeserializeExitConditions(property.Value, options); + continue; + } + if (property.NameEquals("state"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + state = new BatchTaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("stateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("previousState"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousState = new BatchTaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("previousStateTransitionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + previousStateTransitionTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("commandLine"u8)) + { + commandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerSettings = BatchTaskContainerSettings.DeserializeBatchTaskContainerSettings(property.Value, options); + continue; + } + if (property.NameEquals("resourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + resourceFiles = array; + continue; + } + if (property.NameEquals("outputFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OutputFile.DeserializeOutputFile(item, options)); + } + outputFiles = array; + continue; + } + if (property.NameEquals("environmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + environmentSettings = array; + continue; + } + if (property.NameEquals("affinityInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + affinityInfo = AffinityInfo.DeserializeAffinityInfo(property.Value, options); + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchTaskConstraints.DeserializeBatchTaskConstraints(property.Value, options); + continue; + } + if (property.NameEquals("requiredSlots"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + requiredSlots = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("userIdentity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + userIdentity = UserIdentity.DeserializeUserIdentity(property.Value, options); + continue; + } + if (property.NameEquals("executionInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + executionInfo = BatchTaskExecutionInfo.DeserializeBatchTaskExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("nodeInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeInfo = BatchNodeInfo.DeserializeBatchNodeInfo(property.Value, options); + continue; + } + if (property.NameEquals("multiInstanceSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + multiInstanceSettings = MultiInstanceSettings.DeserializeMultiInstanceSettings(property.Value, options); + continue; + } + if (property.NameEquals("stats"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + stats = BatchTaskStatistics.DeserializeBatchTaskStatistics(property.Value, options); + continue; + } + if (property.NameEquals("dependsOn"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dependsOn = BatchTaskDependencies.DeserializeBatchTaskDependencies(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("authenticationTokenSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + authenticationTokenSettings = AuthenticationTokenSettings.DeserializeAuthenticationTokenSettings(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTask( + id, + displayName, + url, + eTag, + lastModified, + creationTime, + exitConditions, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + commandLine, + containerSettings, + resourceFiles ?? new ChangeTrackingList(), + outputFiles ?? new ChangeTrackingList(), + environmentSettings ?? new ChangeTrackingList(), + affinityInfo, + constraints, + requiredSlots, + userIdentity, + executionInfo, + nodeInfo, + multiInstanceSettings, + stats, + dependsOn, + applicationPackageReferences ?? new ChangeTrackingList(), + authenticationTokenSettings, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTask)} does not support writing '{options.Format}' format."); + } + } + + BatchTask IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTask(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTask)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTask FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTask(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs new file mode 100644 index 0000000000000..3f38ef61f36d9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs @@ -0,0 +1,183 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Batch will retry Tasks when a recovery operation is triggered on a Node. + /// Examples of recovery operations include (but are not limited to) when an + /// unhealthy Node is rebooted or a Compute Node disappeared due to host failure. + /// Retries due to recovery operations are independent of and are not counted + /// against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + /// retry due to a recovery operation may occur. Because of this, all Tasks should + /// be idempotent. This means Tasks need to tolerate being interrupted and + /// restarted without causing any corruption or duplicate data. The best practice + /// for long running Tasks is to use some form of checkpointing. + /// + public partial class BatchTask + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchTask() + { + ResourceFiles = new ChangeTrackingList(); + OutputFiles = new ChangeTrackingList(); + EnvironmentSettings = new ChangeTrackingList(); + ApplicationPackageReferences = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. + /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The URL of the Task. + /// The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. + /// The last modified time of the Task. + /// The creation time of the Task. + /// How the Batch service should respond when the Task completes. + /// The current state of the Task. + /// The time at which the Task entered its current state. + /// The previous state of the Task. This property is not set if the Task is in its initial Active state. + /// The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + /// A list of environment variable settings for the Task. + /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. + /// The execution constraints that apply to this Task. + /// The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. + /// The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// Information about the execution of the Task. + /// Information about the Compute Node on which the Task ran. + /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. + /// Resource usage statistics for the Task. + /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. + /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + /// Keeps track of any properties unknown to the library. + internal BatchTask(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, ExitConditions exitConditions, BatchTaskState? state, DateTimeOffset? stateTransitionTime, BatchTaskState? previousState, DateTimeOffset? previousStateTransitionTime, string commandLine, BatchTaskContainerSettings containerSettings, IReadOnlyList resourceFiles, IReadOnlyList outputFiles, IReadOnlyList environmentSettings, AffinityInfo affinityInfo, BatchTaskConstraints constraints, int? requiredSlots, UserIdentity userIdentity, BatchTaskExecutionInfo executionInfo, BatchNodeInfo nodeInfo, MultiInstanceSettings multiInstanceSettings, BatchTaskStatistics stats, BatchTaskDependencies dependsOn, IReadOnlyList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + Url = url; + ETag = eTag; + LastModified = lastModified; + CreationTime = creationTime; + ExitConditions = exitConditions; + State = state; + StateTransitionTime = stateTransitionTime; + PreviousState = previousState; + PreviousStateTransitionTime = previousStateTransitionTime; + CommandLine = commandLine; + ContainerSettings = containerSettings; + ResourceFiles = resourceFiles; + OutputFiles = outputFiles; + EnvironmentSettings = environmentSettings; + AffinityInfo = affinityInfo; + Constraints = constraints; + RequiredSlots = requiredSlots; + UserIdentity = userIdentity; + ExecutionInfo = executionInfo; + NodeInfo = nodeInfo; + MultiInstanceSettings = multiInstanceSettings; + Stats = stats; + DependsOn = dependsOn; + ApplicationPackageReferences = applicationPackageReferences; + AuthenticationTokenSettings = authenticationTokenSettings; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. + public string Id { get; } + /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; } + /// The URL of the Task. + public string Url { get; } + /// The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. + public string ETag { get; } + /// The last modified time of the Task. + public DateTimeOffset? LastModified { get; } + /// The creation time of the Task. + public DateTimeOffset? CreationTime { get; } + /// How the Batch service should respond when the Task completes. + public ExitConditions ExitConditions { get; } + /// The current state of the Task. + public BatchTaskState? State { get; } + /// The time at which the Task entered its current state. + public DateTimeOffset? StateTransitionTime { get; } + /// The previous state of the Task. This property is not set if the Task is in its initial Active state. + public BatchTaskState? PreviousState { get; } + /// The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. + public DateTimeOffset? PreviousStateTransitionTime { get; } + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + public string CommandLine { get; } + /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + public BatchTaskContainerSettings ContainerSettings { get; } + /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + public IReadOnlyList ResourceFiles { get; } + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + public IReadOnlyList OutputFiles { get; } + /// A list of environment variable settings for the Task. + public IReadOnlyList EnvironmentSettings { get; } + /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. + public AffinityInfo AffinityInfo { get; } + /// The execution constraints that apply to this Task. + public BatchTaskConstraints Constraints { get; set; } + /// The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. + public int? RequiredSlots { get; } + /// The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + public UserIdentity UserIdentity { get; } + /// Information about the execution of the Task. + public BatchTaskExecutionInfo ExecutionInfo { get; } + /// Information about the Compute Node on which the Task ran. + public BatchNodeInfo NodeInfo { get; } + /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. + public MultiInstanceSettings MultiInstanceSettings { get; } + /// Resource usage statistics for the Task. + public BatchTaskStatistics Stats { get; } + /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. + public BatchTaskDependencies DependsOn { get; } + /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. + public IReadOnlyList ApplicationPackageReferences { get; } + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + public AuthenticationTokenSettings AuthenticationTokenSettings { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.Serialization.cs new file mode 100644 index 0000000000000..68facdb5d556f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.Serialization.cs @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskAddCollectionResult : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Value)) + { + writer.WritePropertyName("value"u8); + writer.WriteStartArray(); + foreach (var item in Value) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskAddCollectionResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskAddCollectionResult(document.RootElement, options); + } + + internal static BatchTaskAddCollectionResult DeserializeBatchTaskAddCollectionResult(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IReadOnlyList value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("value"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchTaskAddResult.DeserializeBatchTaskAddResult(item, options)); + } + value = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskAddCollectionResult(value ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskAddCollectionResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskAddCollectionResult(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskAddCollectionResult FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskAddCollectionResult(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.cs new file mode 100644 index 0000000000000..94824c7958c06 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The result of adding a collection of Tasks to a Job. + public partial class BatchTaskAddCollectionResult + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal BatchTaskAddCollectionResult() + { + Value = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The results of the add Task collection operation. + /// Keeps track of any properties unknown to the library. + internal BatchTaskAddCollectionResult(IReadOnlyList value, IDictionary serializedAdditionalRawData) + { + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The results of the add Task collection operation. + public IReadOnlyList Value { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.Serialization.cs new file mode 100644 index 0000000000000..b39d20c111bc2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.Serialization.cs @@ -0,0 +1,202 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskAddResult : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("status"u8); + writer.WriteStringValue(Status.ToString()); + writer.WritePropertyName("taskId"u8); + writer.WriteStringValue(TaskId); + if (Optional.IsDefined(ETag)) + { + writer.WritePropertyName("eTag"u8); + writer.WriteStringValue(ETag); + } + if (Optional.IsDefined(LastModified)) + { + writer.WritePropertyName("lastModified"u8); + writer.WriteStringValue(LastModified.Value, "O"); + } + if (Optional.IsDefined(Location)) + { + writer.WritePropertyName("location"u8); + writer.WriteStringValue(Location); + } + if (Optional.IsDefined(Error)) + { + writer.WritePropertyName("error"u8); + writer.WriteObjectValue(Error, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskAddResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskAddResult(document.RootElement, options); + } + + internal static BatchTaskAddResult DeserializeBatchTaskAddResult(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchTaskAddStatus status = default; + string taskId = default; + string eTag = default; + DateTimeOffset? lastModified = default; + string location = default; + BatchError error = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("status"u8)) + { + status = new BatchTaskAddStatus(property.Value.GetString()); + continue; + } + if (property.NameEquals("taskId"u8)) + { + taskId = property.Value.GetString(); + continue; + } + if (property.NameEquals("eTag"u8)) + { + eTag = property.Value.GetString(); + continue; + } + if (property.NameEquals("lastModified"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastModified = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("location"u8)) + { + location = property.Value.GetString(); + continue; + } + if (property.NameEquals("error"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + error = BatchError.DeserializeBatchError(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskAddResult( + status, + taskId, + eTag, + lastModified, + location, + error, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskAddResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskAddResult(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskAddResult FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskAddResult(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.cs new file mode 100644 index 0000000000000..2ac298b53fe29 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.cs @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Result for a single Task added as part of an add Task collection operation. + public partial class BatchTaskAddResult + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The status of the add Task request. + /// The ID of the Task for which this is the result. + /// is null. + internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId) + { + Argument.AssertNotNull(taskId, nameof(taskId)); + + Status = status; + TaskId = taskId; + } + + /// Initializes a new instance of . + /// The status of the add Task request. + /// The ID of the Task for which this is the result. + /// The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. + /// The last modified time of the Task. + /// The URL of the Task, if the Task was successfully added. + /// The error encountered while attempting to add the Task. + /// Keeps track of any properties unknown to the library. + internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId, string eTag, DateTimeOffset? lastModified, string location, BatchError error, IDictionary serializedAdditionalRawData) + { + Status = status; + TaskId = taskId; + ETag = eTag; + LastModified = lastModified; + Location = location; + Error = error; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskAddResult() + { + } + + /// The status of the add Task request. + public BatchTaskAddStatus Status { get; } + /// The ID of the Task for which this is the result. + public string TaskId { get; } + /// The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. + public string ETag { get; } + /// The last modified time of the Task. + public DateTimeOffset? LastModified { get; } + /// The URL of the Task, if the Task was successfully added. + public string Location { get; } + /// The error encountered while attempting to add the Task. + public BatchError Error { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddStatus.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddStatus.cs new file mode 100644 index 0000000000000..9e3df2887ec1b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddStatus.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchTaskAddStatus enums. + public readonly partial struct BatchTaskAddStatus : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchTaskAddStatus(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SuccessValue = "success"; + private const string ClientErrorValue = "clienterror"; + private const string ServerErrorValue = "servererror"; + + /// The Task was added successfully. + public static BatchTaskAddStatus Success { get; } = new BatchTaskAddStatus(SuccessValue); + /// The Task failed to add due to a client error and should not be retried without modifying the request as appropriate. + public static BatchTaskAddStatus ClientError { get; } = new BatchTaskAddStatus(ClientErrorValue); + /// Task failed to add due to a server error and can be retried without modification. + public static BatchTaskAddStatus ServerError { get; } = new BatchTaskAddStatus(ServerErrorValue); + /// Determines if two values are the same. + public static bool operator ==(BatchTaskAddStatus left, BatchTaskAddStatus right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchTaskAddStatus left, BatchTaskAddStatus right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchTaskAddStatus(string value) => new BatchTaskAddStatus(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchTaskAddStatus other && Equals(other); + /// + public bool Equals(BatchTaskAddStatus other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskConstraints.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskConstraints.Serialization.cs new file mode 100644 index 0000000000000..21a6503e72288 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskConstraints.Serialization.cs @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskConstraints : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskConstraints)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(MaxWallClockTime)) + { + writer.WritePropertyName("maxWallClockTime"u8); + writer.WriteStringValue(MaxWallClockTime.Value, "P"); + } + if (Optional.IsDefined(RetentionTime)) + { + writer.WritePropertyName("retentionTime"u8); + writer.WriteStringValue(RetentionTime.Value, "P"); + } + if (Optional.IsDefined(MaxTaskRetryCount)) + { + writer.WritePropertyName("maxTaskRetryCount"u8); + writer.WriteNumberValue(MaxTaskRetryCount.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskConstraints IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskConstraints)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskConstraints(document.RootElement, options); + } + + internal static BatchTaskConstraints DeserializeBatchTaskConstraints(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + TimeSpan? maxWallClockTime = default; + TimeSpan? retentionTime = default; + int? maxTaskRetryCount = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("maxWallClockTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxWallClockTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("retentionTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + retentionTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("maxTaskRetryCount"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxTaskRetryCount = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskConstraints(maxWallClockTime, retentionTime, maxTaskRetryCount, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskConstraints)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskConstraints IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskConstraints(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskConstraints)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskConstraints FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskConstraints(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskConstraints.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskConstraints.cs new file mode 100644 index 0000000000000..1b6a133c6d6a9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskConstraints.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Execution constraints to apply to a Task. + public partial class BatchTaskConstraints + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchTaskConstraints() + { + } + + /// Initializes a new instance of . + /// The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. + /// The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. + /// The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). + /// Keeps track of any properties unknown to the library. + internal BatchTaskConstraints(TimeSpan? maxWallClockTime, TimeSpan? retentionTime, int? maxTaskRetryCount, IDictionary serializedAdditionalRawData) + { + MaxWallClockTime = maxWallClockTime; + RetentionTime = retentionTime; + MaxTaskRetryCount = maxTaskRetryCount; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The maximum elapsed time that the Task may run, measured from the time the Task starts. If the Task does not complete within the time limit, the Batch service terminates it. If this is not specified, there is no time limit on how long the Task may run. + public TimeSpan? MaxWallClockTime { get; set; } + /// The minimum time to retain the Task directory on the Compute Node where it ran, from the time it completes execution. After this time, the Batch service may delete the Task directory and all its contents. The default is 7 days, i.e. the Task directory will be retained for 7 days unless the Compute Node is removed or the Job is deleted. + public TimeSpan? RetentionTime { get; set; } + /// The maximum number of times the Task may be retried. The Batch service retries a Task if its exit code is nonzero. Note that this value specifically controls the number of retries for the Task executable due to a nonzero exit code. The Batch service will try the Task once, and may then retry up to this limit. For example, if the maximum retry count is 3, Batch tries the Task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the Task after the first attempt. If the maximum retry count is -1, the Batch service retries the Task without limit, however this is not recommended for a start task or any task. The default value is 0 (no retries). + public int? MaxTaskRetryCount { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerExecutionInfo.Serialization.cs new file mode 100644 index 0000000000000..e3a1d872a7718 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerExecutionInfo.Serialization.cs @@ -0,0 +1,160 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskContainerExecutionInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskContainerExecutionInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(ContainerId)) + { + writer.WritePropertyName("containerId"u8); + writer.WriteStringValue(ContainerId); + } + if (Optional.IsDefined(State)) + { + writer.WritePropertyName("state"u8); + writer.WriteStringValue(State); + } + if (Optional.IsDefined(Error)) + { + writer.WritePropertyName("error"u8); + writer.WriteStringValue(Error); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskContainerExecutionInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskContainerExecutionInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskContainerExecutionInfo(document.RootElement, options); + } + + internal static BatchTaskContainerExecutionInfo DeserializeBatchTaskContainerExecutionInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string containerId = default; + string state = default; + string error = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("containerId"u8)) + { + containerId = property.Value.GetString(); + continue; + } + if (property.NameEquals("state"u8)) + { + state = property.Value.GetString(); + continue; + } + if (property.NameEquals("error"u8)) + { + error = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskContainerExecutionInfo(containerId, state, error, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskContainerExecutionInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskContainerExecutionInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskContainerExecutionInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskContainerExecutionInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskContainerExecutionInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskContainerExecutionInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerExecutionInfo.cs new file mode 100644 index 0000000000000..966643cac5f60 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerExecutionInfo.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Contains information about the container which a Task is executing. + public partial class BatchTaskContainerExecutionInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchTaskContainerExecutionInfo() + { + } + + /// Initializes a new instance of . + /// The ID of the container. + /// The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". + /// Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". + /// Keeps track of any properties unknown to the library. + internal BatchTaskContainerExecutionInfo(string containerId, string state, string error, IDictionary serializedAdditionalRawData) + { + ContainerId = containerId; + State = state; + Error = error; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ID of the container. + public string ContainerId { get; set; } + /// The state of the container. This is the state of the container according to the Docker service. It is equivalent to the status field returned by "docker inspect". + public string State { get; set; } + /// Detailed error information about the container. This is the detailed error string from the Docker service, if available. It is equivalent to the error field returned by "docker inspect". + public string Error { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs new file mode 100644 index 0000000000000..df8670ff83d07 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.Serialization.cs @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskContainerSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskContainerSettings)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(ContainerRunOptions)) + { + writer.WritePropertyName("containerRunOptions"u8); + writer.WriteStringValue(ContainerRunOptions); + } + writer.WritePropertyName("imageName"u8); + writer.WriteStringValue(ImageName); + if (Optional.IsDefined(Registry)) + { + writer.WritePropertyName("registry"u8); + writer.WriteObjectValue(Registry, options); + } + if (Optional.IsDefined(WorkingDirectory)) + { + writer.WritePropertyName("workingDirectory"u8); + writer.WriteStringValue(WorkingDirectory.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskContainerSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskContainerSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskContainerSettings(document.RootElement, options); + } + + internal static BatchTaskContainerSettings DeserializeBatchTaskContainerSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string containerRunOptions = default; + string imageName = default; + ContainerRegistryReference registry = default; + ContainerWorkingDirectory? workingDirectory = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("containerRunOptions"u8)) + { + containerRunOptions = property.Value.GetString(); + continue; + } + if (property.NameEquals("imageName"u8)) + { + imageName = property.Value.GetString(); + continue; + } + if (property.NameEquals("registry"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + registry = ContainerRegistryReference.DeserializeContainerRegistryReference(property.Value, options); + continue; + } + if (property.NameEquals("workingDirectory"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + workingDirectory = new ContainerWorkingDirectory(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskContainerSettings(containerRunOptions, imageName, registry, workingDirectory, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskContainerSettings)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskContainerSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskContainerSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskContainerSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskContainerSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskContainerSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs new file mode 100644 index 0000000000000..9ecfdf09e22b8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskContainerSettings.cs @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The container settings for a Task. + public partial class BatchTaskContainerSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. + /// is null. + public BatchTaskContainerSettings(string imageName) + { + Argument.AssertNotNull(imageName, nameof(imageName)); + + ImageName = imageName; + } + + /// Initializes a new instance of . + /// Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. + /// The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. + /// The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. + /// The location of the container Task working directory. The default is 'taskWorkingDirectory'. + /// Keeps track of any properties unknown to the library. + internal BatchTaskContainerSettings(string containerRunOptions, string imageName, ContainerRegistryReference registry, ContainerWorkingDirectory? workingDirectory, IDictionary serializedAdditionalRawData) + { + ContainerRunOptions = containerRunOptions; + ImageName = imageName; + Registry = registry; + WorkingDirectory = workingDirectory; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskContainerSettings() + { + } + + /// Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. + public string ContainerRunOptions { get; set; } + /// The Image to use to create the container in which the Task will run. This is the full Image reference, as would be specified to "docker pull". If no tag is provided as part of the Image name, the tag ":latest" is used as a default. + public string ImageName { get; set; } + /// The private registry which contains the container Image. This setting can be omitted if was already provided at Pool creation. + public ContainerRegistryReference Registry { get; set; } + /// The location of the container Task working directory. The default is 'taskWorkingDirectory'. + public ContainerWorkingDirectory? WorkingDirectory { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCounts.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCounts.Serialization.cs new file mode 100644 index 0000000000000..ac91cee387b9b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCounts.Serialization.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskCounts : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskCounts)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("active"u8); + writer.WriteNumberValue(Active); + writer.WritePropertyName("running"u8); + writer.WriteNumberValue(Running); + writer.WritePropertyName("completed"u8); + writer.WriteNumberValue(Completed); + writer.WritePropertyName("succeeded"u8); + writer.WriteNumberValue(Succeeded); + writer.WritePropertyName("failed"u8); + writer.WriteNumberValue(Failed); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskCounts IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskCounts)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskCounts(document.RootElement, options); + } + + internal static BatchTaskCounts DeserializeBatchTaskCounts(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int active = default; + int running = default; + int completed = default; + int succeeded = default; + int failed = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("active"u8)) + { + active = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("running"u8)) + { + running = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("completed"u8)) + { + completed = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("succeeded"u8)) + { + succeeded = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("failed"u8)) + { + failed = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskCounts( + active, + running, + completed, + succeeded, + failed, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskCounts)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskCounts IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskCounts(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskCounts)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskCounts FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskCounts(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCounts.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCounts.cs new file mode 100644 index 0000000000000..c5f9f3aa50999 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCounts.cs @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The Task counts for a Job. + public partial class BatchTaskCounts + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of Tasks in the active state. + /// The number of Tasks in the running or preparing state. + /// The number of Tasks in the completed state. + /// The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. + /// The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. + internal BatchTaskCounts(int active, int running, int completed, int succeeded, int failed) + { + Active = active; + Running = running; + Completed = completed; + Succeeded = succeeded; + Failed = failed; + } + + /// Initializes a new instance of . + /// The number of Tasks in the active state. + /// The number of Tasks in the running or preparing state. + /// The number of Tasks in the completed state. + /// The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. + /// The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. + /// Keeps track of any properties unknown to the library. + internal BatchTaskCounts(int active, int running, int completed, int succeeded, int failed, IDictionary serializedAdditionalRawData) + { + Active = active; + Running = running; + Completed = completed; + Succeeded = succeeded; + Failed = failed; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskCounts() + { + } + + /// The number of Tasks in the active state. + public int Active { get; } + /// The number of Tasks in the running or preparing state. + public int Running { get; } + /// The number of Tasks in the completed state. + public int Completed { get; } + /// The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. + public int Succeeded { get; } + /// The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. + public int Failed { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCountsResult.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCountsResult.Serialization.cs new file mode 100644 index 0000000000000..391729746f972 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCountsResult.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskCountsResult : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskCountsResult)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("taskCounts"u8); + writer.WriteObjectValue(TaskCounts, options); + writer.WritePropertyName("taskSlotCounts"u8); + writer.WriteObjectValue(TaskSlotCounts, options); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskCountsResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskCountsResult)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskCountsResult(document.RootElement, options); + } + + internal static BatchTaskCountsResult DeserializeBatchTaskCountsResult(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchTaskCounts taskCounts = default; + BatchTaskSlotCounts taskSlotCounts = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("taskCounts"u8)) + { + taskCounts = BatchTaskCounts.DeserializeBatchTaskCounts(property.Value, options); + continue; + } + if (property.NameEquals("taskSlotCounts"u8)) + { + taskSlotCounts = BatchTaskSlotCounts.DeserializeBatchTaskSlotCounts(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskCountsResult(taskCounts, taskSlotCounts, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskCountsResult)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskCountsResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskCountsResult(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskCountsResult)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskCountsResult FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskCountsResult(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCountsResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCountsResult.cs new file mode 100644 index 0000000000000..e65c44f712bc4 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCountsResult.cs @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The Task and TaskSlot counts for a Job. + public partial class BatchTaskCountsResult + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of Tasks per state. + /// The number of TaskSlots required by Tasks per state. + /// or is null. + internal BatchTaskCountsResult(BatchTaskCounts taskCounts, BatchTaskSlotCounts taskSlotCounts) + { + Argument.AssertNotNull(taskCounts, nameof(taskCounts)); + Argument.AssertNotNull(taskSlotCounts, nameof(taskSlotCounts)); + + TaskCounts = taskCounts; + TaskSlotCounts = taskSlotCounts; + } + + /// Initializes a new instance of . + /// The number of Tasks per state. + /// The number of TaskSlots required by Tasks per state. + /// Keeps track of any properties unknown to the library. + internal BatchTaskCountsResult(BatchTaskCounts taskCounts, BatchTaskSlotCounts taskSlotCounts, IDictionary serializedAdditionalRawData) + { + TaskCounts = taskCounts; + TaskSlotCounts = taskSlotCounts; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskCountsResult() + { + } + + /// The number of Tasks per state. + public BatchTaskCounts TaskCounts { get; } + /// The number of TaskSlots required by Tasks per state. + public BatchTaskSlotCounts TaskSlotCounts { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.Serialization.cs new file mode 100644 index 0000000000000..6481923dc5851 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.Serialization.cs @@ -0,0 +1,406 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskCreateContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (Optional.IsDefined(DisplayName)) + { + writer.WritePropertyName("displayName"u8); + writer.WriteStringValue(DisplayName); + } + if (Optional.IsDefined(ExitConditions)) + { + writer.WritePropertyName("exitConditions"u8); + writer.WriteObjectValue(ExitConditions, options); + } + writer.WritePropertyName("commandLine"u8); + writer.WriteStringValue(CommandLine); + if (Optional.IsDefined(ContainerSettings)) + { + writer.WritePropertyName("containerSettings"u8); + writer.WriteObjectValue(ContainerSettings, options); + } + if (Optional.IsCollectionDefined(ResourceFiles)) + { + writer.WritePropertyName("resourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in ResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(OutputFiles)) + { + writer.WritePropertyName("outputFiles"u8); + writer.WriteStartArray(); + foreach (var item in OutputFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(EnvironmentSettings)) + { + writer.WritePropertyName("environmentSettings"u8); + writer.WriteStartArray(); + foreach (var item in EnvironmentSettings) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(AffinityInfo)) + { + writer.WritePropertyName("affinityInfo"u8); + writer.WriteObjectValue(AffinityInfo, options); + } + if (Optional.IsDefined(Constraints)) + { + writer.WritePropertyName("constraints"u8); + writer.WriteObjectValue(Constraints, options); + } + if (Optional.IsDefined(RequiredSlots)) + { + writer.WritePropertyName("requiredSlots"u8); + writer.WriteNumberValue(RequiredSlots.Value); + } + if (Optional.IsDefined(UserIdentity)) + { + writer.WritePropertyName("userIdentity"u8); + writer.WriteObjectValue(UserIdentity, options); + } + if (Optional.IsDefined(MultiInstanceSettings)) + { + writer.WritePropertyName("multiInstanceSettings"u8); + writer.WriteObjectValue(MultiInstanceSettings, options); + } + if (Optional.IsDefined(DependsOn)) + { + writer.WritePropertyName("dependsOn"u8); + writer.WriteObjectValue(DependsOn, options); + } + if (Optional.IsCollectionDefined(ApplicationPackageReferences)) + { + writer.WritePropertyName("applicationPackageReferences"u8); + writer.WriteStartArray(); + foreach (var item in ApplicationPackageReferences) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(AuthenticationTokenSettings)) + { + writer.WritePropertyName("authenticationTokenSettings"u8); + writer.WriteObjectValue(AuthenticationTokenSettings, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskCreateContent(document.RootElement, options); + } + + internal static BatchTaskCreateContent DeserializeBatchTaskCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string displayName = default; + ExitConditions exitConditions = default; + string commandLine = default; + BatchTaskContainerSettings containerSettings = default; + IList resourceFiles = default; + IList outputFiles = default; + IList environmentSettings = default; + AffinityInfo affinityInfo = default; + BatchTaskConstraints constraints = default; + int? requiredSlots = default; + UserIdentity userIdentity = default; + MultiInstanceSettings multiInstanceSettings = default; + BatchTaskDependencies dependsOn = default; + IList applicationPackageReferences = default; + AuthenticationTokenSettings authenticationTokenSettings = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayName"u8)) + { + displayName = property.Value.GetString(); + continue; + } + if (property.NameEquals("exitConditions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitConditions = ExitConditions.DeserializeExitConditions(property.Value, options); + continue; + } + if (property.NameEquals("commandLine"u8)) + { + commandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerSettings = BatchTaskContainerSettings.DeserializeBatchTaskContainerSettings(property.Value, options); + continue; + } + if (property.NameEquals("resourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + resourceFiles = array; + continue; + } + if (property.NameEquals("outputFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(OutputFile.DeserializeOutputFile(item, options)); + } + outputFiles = array; + continue; + } + if (property.NameEquals("environmentSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(EnvironmentSetting.DeserializeEnvironmentSetting(item, options)); + } + environmentSettings = array; + continue; + } + if (property.NameEquals("affinityInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + affinityInfo = AffinityInfo.DeserializeAffinityInfo(property.Value, options); + continue; + } + if (property.NameEquals("constraints"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + constraints = BatchTaskConstraints.DeserializeBatchTaskConstraints(property.Value, options); + continue; + } + if (property.NameEquals("requiredSlots"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + requiredSlots = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("userIdentity"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + userIdentity = UserIdentity.DeserializeUserIdentity(property.Value, options); + continue; + } + if (property.NameEquals("multiInstanceSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + multiInstanceSettings = MultiInstanceSettings.DeserializeMultiInstanceSettings(property.Value, options); + continue; + } + if (property.NameEquals("dependsOn"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dependsOn = BatchTaskDependencies.DeserializeBatchTaskDependencies(property.Value, options); + continue; + } + if (property.NameEquals("applicationPackageReferences"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); + } + applicationPackageReferences = array; + continue; + } + if (property.NameEquals("authenticationTokenSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + authenticationTokenSettings = AuthenticationTokenSettings.DeserializeAuthenticationTokenSettings(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskCreateContent( + id, + displayName, + exitConditions, + commandLine, + containerSettings, + resourceFiles ?? new ChangeTrackingList(), + outputFiles ?? new ChangeTrackingList(), + environmentSettings ?? new ChangeTrackingList(), + affinityInfo, + constraints, + requiredSlots, + userIdentity, + multiInstanceSettings, + dependsOn, + applicationPackageReferences ?? new ChangeTrackingList(), + authenticationTokenSettings, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskCreateContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskCreateContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskCreateContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs new file mode 100644 index 0000000000000..88818560e6b85 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Parameters for creating an Azure Batch Task. + public partial class BatchTaskCreateContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// or is null. + public BatchTaskCreateContent(string id, string commandLine) + { + Argument.AssertNotNull(id, nameof(id)); + Argument.AssertNotNull(commandLine, nameof(commandLine)); + + Id = id; + CommandLine = commandLine; + ResourceFiles = new ChangeTrackingList(); + OutputFiles = new ChangeTrackingList(); + EnvironmentSettings = new ChangeTrackingList(); + ApplicationPackageReferences = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). + /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// How the Batch service should respond when the Task completes. + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + /// A list of environment variable settings for the Task. + /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. + /// The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. + /// The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. + /// The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. + /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. + /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + /// Keeps track of any properties unknown to the library. + internal BatchTaskCreateContent(string id, string displayName, ExitConditions exitConditions, string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList outputFiles, IList environmentSettings, AffinityInfo affinityInfo, BatchTaskConstraints constraints, int? requiredSlots, UserIdentity userIdentity, MultiInstanceSettings multiInstanceSettings, BatchTaskDependencies dependsOn, IList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, IDictionary serializedAdditionalRawData) + { + Id = id; + DisplayName = displayName; + ExitConditions = exitConditions; + CommandLine = commandLine; + ContainerSettings = containerSettings; + ResourceFiles = resourceFiles; + OutputFiles = outputFiles; + EnvironmentSettings = environmentSettings; + AffinityInfo = affinityInfo; + Constraints = constraints; + RequiredSlots = requiredSlots; + UserIdentity = userIdentity; + MultiInstanceSettings = multiInstanceSettings; + DependsOn = dependsOn; + ApplicationPackageReferences = applicationPackageReferences; + AuthenticationTokenSettings = authenticationTokenSettings; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskCreateContent() + { + } + + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). + public string Id { get; } + /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + public string DisplayName { get; set; } + /// How the Batch service should respond when the Task completes. + public ExitConditions ExitConditions { get; set; } + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + public string CommandLine { get; } + /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + public BatchTaskContainerSettings ContainerSettings { get; set; } + /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + public IList ResourceFiles { get; } + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + public IList OutputFiles { get; } + /// A list of environment variable settings for the Task. + public IList EnvironmentSettings { get; } + /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. + public AffinityInfo AffinityInfo { get; set; } + /// The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. + public BatchTaskConstraints Constraints { get; set; } + /// The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. + public int? RequiredSlots { get; set; } + /// The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + public UserIdentity UserIdentity { get; set; } + /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. + public MultiInstanceSettings MultiInstanceSettings { get; set; } + /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. + public BatchTaskDependencies DependsOn { get; set; } + /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. + public IList ApplicationPackageReferences { get; } + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + public AuthenticationTokenSettings AuthenticationTokenSettings { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskDependencies.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskDependencies.Serialization.cs new file mode 100644 index 0000000000000..f60524791f391 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskDependencies.Serialization.cs @@ -0,0 +1,177 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskDependencies : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskDependencies)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(TaskIds)) + { + writer.WritePropertyName("taskIds"u8); + writer.WriteStartArray(); + foreach (var item in TaskIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(TaskIdRanges)) + { + writer.WritePropertyName("taskIdRanges"u8); + writer.WriteStartArray(); + foreach (var item in TaskIdRanges) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskDependencies IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskDependencies)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskDependencies(document.RootElement, options); + } + + internal static BatchTaskDependencies DeserializeBatchTaskDependencies(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList taskIds = default; + IList taskIdRanges = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("taskIds"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + taskIds = array; + continue; + } + if (property.NameEquals("taskIdRanges"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchTaskIdRange.DeserializeBatchTaskIdRange(item, options)); + } + taskIdRanges = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskDependencies(taskIds ?? new ChangeTrackingList(), taskIdRanges ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskDependencies)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskDependencies IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskDependencies(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskDependencies)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskDependencies FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskDependencies(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskDependencies.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskDependencies.cs new file mode 100644 index 0000000000000..a0038ba94fe9f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskDependencies.cs @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Specifies any dependencies of a Task. Any Task that is explicitly specified or + /// within a dependency range must complete before the dependant Task will be + /// scheduled. + /// + public partial class BatchTaskDependencies + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public BatchTaskDependencies() + { + TaskIds = new ChangeTrackingList(); + TaskIdRanges = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. + /// The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. + /// Keeps track of any properties unknown to the library. + internal BatchTaskDependencies(IList taskIds, IList taskIdRanges, IDictionary serializedAdditionalRawData) + { + TaskIds = taskIds; + TaskIdRanges = taskIdRanges; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is limited to 64000 characters total (i.e. the combined length of all Task IDs). If the taskIds collection exceeds the maximum length, the Add Task request fails with error code TaskDependencyListTooLong. In this case consider using Task ID ranges instead. + public IList TaskIds { get; } + /// The list of Task ID ranges that this Task depends on. All Tasks in all ranges must complete successfully before the dependent Task can be scheduled. + public IList TaskIdRanges { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionInfo.Serialization.cs new file mode 100644 index 0000000000000..37d904a938704 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionInfo.Serialization.cs @@ -0,0 +1,274 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskExecutionInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskExecutionInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(StartTime)) + { + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime.Value, "O"); + } + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + if (Optional.IsDefined(ExitCode)) + { + writer.WritePropertyName("exitCode"u8); + writer.WriteNumberValue(ExitCode.Value); + } + if (Optional.IsDefined(ContainerInfo)) + { + writer.WritePropertyName("containerInfo"u8); + writer.WriteObjectValue(ContainerInfo, options); + } + if (Optional.IsDefined(FailureInfo)) + { + writer.WritePropertyName("failureInfo"u8); + writer.WriteObjectValue(FailureInfo, options); + } + writer.WritePropertyName("retryCount"u8); + writer.WriteNumberValue(RetryCount); + if (Optional.IsDefined(LastRetryTime)) + { + writer.WritePropertyName("lastRetryTime"u8); + writer.WriteStringValue(LastRetryTime.Value, "O"); + } + writer.WritePropertyName("requeueCount"u8); + writer.WriteNumberValue(RequeueCount); + if (Optional.IsDefined(LastRequeueTime)) + { + writer.WritePropertyName("lastRequeueTime"u8); + writer.WriteStringValue(LastRequeueTime.Value, "O"); + } + if (Optional.IsDefined(Result)) + { + writer.WritePropertyName("result"u8); + writer.WriteStringValue(Result.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskExecutionInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskExecutionInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskExecutionInfo(document.RootElement, options); + } + + internal static BatchTaskExecutionInfo DeserializeBatchTaskExecutionInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset? startTime = default; + DateTimeOffset? endTime = default; + int? exitCode = default; + BatchTaskContainerExecutionInfo containerInfo = default; + BatchTaskFailureInfo failureInfo = default; + int retryCount = default; + DateTimeOffset? lastRetryTime = default; + int requeueCount = default; + DateTimeOffset? lastRequeueTime = default; + BatchTaskExecutionResult? result = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("startTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("exitCode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + exitCode = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("containerInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerInfo = BatchTaskContainerExecutionInfo.DeserializeBatchTaskContainerExecutionInfo(property.Value, options); + continue; + } + if (property.NameEquals("failureInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + failureInfo = BatchTaskFailureInfo.DeserializeBatchTaskFailureInfo(property.Value, options); + continue; + } + if (property.NameEquals("retryCount"u8)) + { + retryCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("lastRetryTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastRetryTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("requeueCount"u8)) + { + requeueCount = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("lastRequeueTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + lastRequeueTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("result"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + result = new BatchTaskExecutionResult(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskExecutionInfo( + startTime, + endTime, + exitCode, + containerInfo, + failureInfo, + retryCount, + lastRetryTime, + requeueCount, + lastRequeueTime, + result, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskExecutionInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskExecutionInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskExecutionInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskExecutionInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskExecutionInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskExecutionInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionInfo.cs new file mode 100644 index 0000000000000..48d9c67b08ee5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionInfo.cs @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about the execution of a Task. + public partial class BatchTaskExecutionInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + /// The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. + public BatchTaskExecutionInfo(int retryCount, int requeueCount) + { + RetryCount = retryCount; + RequeueCount = requeueCount; + } + + /// Initializes a new instance of . + /// The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. + /// The time at which the Task completed. This property is set only if the Task is in the Completed state. + /// The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + /// The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + /// The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. + /// The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// Keeps track of any properties unknown to the library. + internal BatchTaskExecutionInfo(DateTimeOffset? startTime, DateTimeOffset? endTime, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, int retryCount, DateTimeOffset? lastRetryTime, int requeueCount, DateTimeOffset? lastRequeueTime, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + { + StartTime = startTime; + EndTime = endTime; + ExitCode = exitCode; + ContainerInfo = containerInfo; + FailureInfo = failureInfo; + RetryCount = retryCount; + LastRetryTime = lastRetryTime; + RequeueCount = requeueCount; + LastRequeueTime = lastRequeueTime; + Result = result; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskExecutionInfo() + { + } + + /// The time at which the Task started running. 'Running' corresponds to the running state, so if the Task specifies resource files or Packages, then the start time reflects the time at which the Task started downloading or deploying these. If the Task has been restarted or retried, this is the most recent time at which the Task started running. This property is present only for Tasks that are in the running or completed state. + public DateTimeOffset? StartTime { get; set; } + /// The time at which the Task completed. This property is set only if the Task is in the Completed state. + public DateTimeOffset? EndTime { get; set; } + /// The exit code of the program specified on the Task command line. This property is set only if the Task is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the Task (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + public int? ExitCode { get; set; } + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + public BatchTaskContainerExecutionInfo ContainerInfo { get; set; } + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + public BatchTaskFailureInfo FailureInfo { get; set; } + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + public int RetryCount { get; set; } + /// The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + public DateTimeOffset? LastRetryTime { get; set; } + /// The number of times the Task has been requeued by the Batch service as the result of a user request. When the user removes Compute Nodes from a Pool (by resizing/shrinking the pool) or when the Job is being disabled, the user can specify that running Tasks on the Compute Nodes be requeued for execution. This count tracks how many times the Task has been requeued for these reasons. + public int RequeueCount { get; set; } + /// The most recent time at which the Task has been requeued by the Batch service as the result of a user request. This property is set only if the requeueCount is nonzero. + public DateTimeOffset? LastRequeueTime { get; set; } + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + public BatchTaskExecutionResult? Result { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionResult.cs new file mode 100644 index 0000000000000..d8214b52fbc1e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskExecutionResult.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchTaskExecutionResult enums. + public readonly partial struct BatchTaskExecutionResult : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchTaskExecutionResult(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SuccessValue = "success"; + private const string FailureValue = "failure"; + + /// The Task ran successfully. + public static BatchTaskExecutionResult Success { get; } = new BatchTaskExecutionResult(SuccessValue); + /// There was an error during processing of the Task. The failure may have occurred before the Task process was launched, while the Task process was executing, or after the Task process exited. + public static BatchTaskExecutionResult Failure { get; } = new BatchTaskExecutionResult(FailureValue); + /// Determines if two values are the same. + public static bool operator ==(BatchTaskExecutionResult left, BatchTaskExecutionResult right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchTaskExecutionResult left, BatchTaskExecutionResult right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchTaskExecutionResult(string value) => new BatchTaskExecutionResult(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchTaskExecutionResult other && Equals(other); + /// + public bool Equals(BatchTaskExecutionResult other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs new file mode 100644 index 0000000000000..0dac8ee17edde --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskFailureInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskFailureInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("category"u8); + writer.WriteStringValue(Category.ToString()); + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsCollectionDefined(Details)) + { + writer.WritePropertyName("details"u8); + writer.WriteStartArray(); + foreach (var item in Details) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskFailureInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskFailureInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskFailureInfo(document.RootElement, options); + } + + internal static BatchTaskFailureInfo DeserializeBatchTaskFailureInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ErrorCategory category = default; + string code = default; + string message = default; + IList details = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("category"u8)) + { + category = new ErrorCategory(property.Value.GetString()); + continue; + } + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("details"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NameValuePair.DeserializeNameValuePair(item, options)); + } + details = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskFailureInfo(category, code, message, details ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskFailureInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskFailureInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskFailureInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskFailureInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskFailureInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskFailureInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs new file mode 100644 index 0000000000000..5215629e4e572 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about a Task failure. + public partial class BatchTaskFailureInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The category of the Task error. + public BatchTaskFailureInfo(ErrorCategory category) + { + Category = category; + Details = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The category of the Task error. + /// An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Task error, intended to be suitable for display in a user interface. + /// A list of additional details related to the error. + /// Keeps track of any properties unknown to the library. + internal BatchTaskFailureInfo(ErrorCategory category, string code, string message, IList details, IDictionary serializedAdditionalRawData) + { + Category = category; + Code = code; + Message = message; + Details = details; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskFailureInfo() + { + } + + /// The category of the Task error. + public ErrorCategory Category { get; set; } + /// An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; set; } + /// A message describing the Task error, intended to be suitable for display in a user interface. + public string Message { get; set; } + /// A list of additional details related to the error. + public IList Details { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs new file mode 100644 index 0000000000000..e6a413291e88b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskGroup : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskGroup)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("value"u8); + writer.WriteStartArray(); + foreach (var item in Value) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskGroup IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskGroup)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskGroup(document.RootElement, options); + } + + internal static BatchTaskGroup DeserializeBatchTaskGroup(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("value"u8)) + { + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(BatchTaskCreateContent.DeserializeBatchTaskCreateContent(item, options)); + } + value = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskGroup(value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskGroup)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskGroup IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskGroup(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskGroup)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskGroup FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskGroup(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs new file mode 100644 index 0000000000000..952152f88850e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// A collection of Azure Batch Tasks to add. + public partial class BatchTaskGroup + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + /// is null. + public BatchTaskGroup(IEnumerable value) + { + Argument.AssertNotNull(value, nameof(value)); + + Value = value.ToList(); + } + + /// Initializes a new instance of . + /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + /// Keeps track of any properties unknown to the library. + internal BatchTaskGroup(IList value, IDictionary serializedAdditionalRawData) + { + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskGroup() + { + } + + /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + public IList Value { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskIdRange.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskIdRange.Serialization.cs new file mode 100644 index 0000000000000..939c14d5d8396 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskIdRange.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskIdRange : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskIdRange)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("start"u8); + writer.WriteNumberValue(Start); + writer.WritePropertyName("end"u8); + writer.WriteNumberValue(End); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskIdRange IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskIdRange)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskIdRange(document.RootElement, options); + } + + internal static BatchTaskIdRange DeserializeBatchTaskIdRange(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int start = default; + int end = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("start"u8)) + { + start = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("end"u8)) + { + end = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskIdRange(start, end, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskIdRange)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskIdRange IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskIdRange(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskIdRange)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskIdRange FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskIdRange(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskIdRange.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskIdRange.cs new file mode 100644 index 0000000000000..c0eb5a78bc418 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskIdRange.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The start and end of the range are inclusive. For example, if a range has start + /// 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + /// + public partial class BatchTaskIdRange + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The first Task ID in the range. + /// The last Task ID in the range. + public BatchTaskIdRange(int start, int end) + { + Start = start; + End = end; + } + + /// Initializes a new instance of . + /// The first Task ID in the range. + /// The last Task ID in the range. + /// Keeps track of any properties unknown to the library. + internal BatchTaskIdRange(int start, int end, IDictionary serializedAdditionalRawData) + { + Start = start; + End = end; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskIdRange() + { + } + + /// The first Task ID in the range. + public int Start { get; set; } + /// The last Task ID in the range. + public int End { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs new file mode 100644 index 0000000000000..ecc2b0cc77622 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs @@ -0,0 +1,205 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(TaskUrl)) + { + writer.WritePropertyName("taskUrl"u8); + writer.WriteStringValue(TaskUrl); + } + if (Optional.IsDefined(JobId)) + { + writer.WritePropertyName("jobId"u8); + writer.WriteStringValue(JobId); + } + if (Optional.IsDefined(TaskId)) + { + writer.WritePropertyName("taskId"u8); + writer.WriteStringValue(TaskId); + } + if (Optional.IsDefined(SubtaskId)) + { + writer.WritePropertyName("subtaskId"u8); + writer.WriteNumberValue(SubtaskId.Value); + } + writer.WritePropertyName("taskState"u8); + writer.WriteStringValue(TaskState.ToString()); + if (Optional.IsDefined(ExecutionInfo)) + { + writer.WritePropertyName("executionInfo"u8); + writer.WriteObjectValue(ExecutionInfo, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskInfo(document.RootElement, options); + } + + internal static BatchTaskInfo DeserializeBatchTaskInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string taskUrl = default; + string jobId = default; + string taskId = default; + int? subtaskId = default; + BatchTaskState taskState = default; + BatchTaskExecutionInfo executionInfo = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("taskUrl"u8)) + { + taskUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("jobId"u8)) + { + jobId = property.Value.GetString(); + continue; + } + if (property.NameEquals("taskId"u8)) + { + taskId = property.Value.GetString(); + continue; + } + if (property.NameEquals("subtaskId"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + subtaskId = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("taskState"u8)) + { + taskState = new BatchTaskState(property.Value.GetString()); + continue; + } + if (property.NameEquals("executionInfo"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + executionInfo = BatchTaskExecutionInfo.DeserializeBatchTaskExecutionInfo(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskInfo( + taskUrl, + jobId, + taskId, + subtaskId, + taskState, + executionInfo, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskInfo)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs new file mode 100644 index 0000000000000..1302e69a62a5f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about a Task running on a Compute Node. + public partial class BatchTaskInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The current state of the Task. + internal BatchTaskInfo(BatchTaskState taskState) + { + TaskState = taskState; + } + + /// Initializes a new instance of . + /// The URL of the Task. + /// The ID of the Job to which the Task belongs. + /// The ID of the Task. + /// The ID of the subtask if the Task is a multi-instance Task. + /// The current state of the Task. + /// Information about the execution of the Task. + /// Keeps track of any properties unknown to the library. + internal BatchTaskInfo(string taskUrl, string jobId, string taskId, int? subtaskId, BatchTaskState taskState, BatchTaskExecutionInfo executionInfo, IDictionary serializedAdditionalRawData) + { + TaskUrl = taskUrl; + JobId = jobId; + TaskId = taskId; + SubtaskId = subtaskId; + TaskState = taskState; + ExecutionInfo = executionInfo; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskInfo() + { + } + + /// The URL of the Task. + public string TaskUrl { get; } + /// The ID of the Job to which the Task belongs. + public string JobId { get; } + /// The ID of the Task. + public string TaskId { get; } + /// The ID of the subtask if the Task is a multi-instance Task. + public int? SubtaskId { get; } + /// The current state of the Task. + public BatchTaskState TaskState { get; } + /// Information about the execution of the Task. + public BatchTaskExecutionInfo ExecutionInfo { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs new file mode 100644 index 0000000000000..418c684f12401 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskSchedulingPolicy : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskSchedulingPolicy)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("nodeFillType"u8); + writer.WriteStringValue(NodeFillType.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskSchedulingPolicy IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskSchedulingPolicy)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskSchedulingPolicy(document.RootElement, options); + } + + internal static BatchTaskSchedulingPolicy DeserializeBatchTaskSchedulingPolicy(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchNodeFillType nodeFillType = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("nodeFillType"u8)) + { + nodeFillType = new BatchNodeFillType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskSchedulingPolicy(nodeFillType, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskSchedulingPolicy)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskSchedulingPolicy IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskSchedulingPolicy(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskSchedulingPolicy)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskSchedulingPolicy FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskSchedulingPolicy(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs new file mode 100644 index 0000000000000..40078de0754f6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSchedulingPolicy.cs @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies how Tasks should be distributed across Compute Nodes. + public partial class BatchTaskSchedulingPolicy + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + public BatchTaskSchedulingPolicy(BatchNodeFillType nodeFillType) + { + NodeFillType = nodeFillType; + } + + /// Initializes a new instance of . + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + /// Keeps track of any properties unknown to the library. + internal BatchTaskSchedulingPolicy(BatchNodeFillType nodeFillType, IDictionary serializedAdditionalRawData) + { + NodeFillType = nodeFillType; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskSchedulingPolicy() + { + } + + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + public BatchNodeFillType NodeFillType { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSlotCounts.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSlotCounts.Serialization.cs new file mode 100644 index 0000000000000..0ff4c5b62c720 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSlotCounts.Serialization.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskSlotCounts : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskSlotCounts)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("active"u8); + writer.WriteNumberValue(Active); + writer.WritePropertyName("running"u8); + writer.WriteNumberValue(Running); + writer.WritePropertyName("completed"u8); + writer.WriteNumberValue(Completed); + writer.WritePropertyName("succeeded"u8); + writer.WriteNumberValue(Succeeded); + writer.WritePropertyName("failed"u8); + writer.WriteNumberValue(Failed); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskSlotCounts IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskSlotCounts)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskSlotCounts(document.RootElement, options); + } + + internal static BatchTaskSlotCounts DeserializeBatchTaskSlotCounts(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int active = default; + int running = default; + int completed = default; + int succeeded = default; + int failed = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("active"u8)) + { + active = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("running"u8)) + { + running = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("completed"u8)) + { + completed = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("succeeded"u8)) + { + succeeded = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("failed"u8)) + { + failed = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskSlotCounts( + active, + running, + completed, + succeeded, + failed, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskSlotCounts)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskSlotCounts IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskSlotCounts(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskSlotCounts)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskSlotCounts FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskSlotCounts(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSlotCounts.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSlotCounts.cs new file mode 100644 index 0000000000000..f413a24cb7410 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskSlotCounts.cs @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The TaskSlot counts for a Job. + public partial class BatchTaskSlotCounts + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The number of TaskSlots for active Tasks. + /// The number of TaskSlots for running Tasks. + /// The number of TaskSlots for completed Tasks. + /// The number of TaskSlots for succeeded Tasks. + /// The number of TaskSlots for failed Tasks. + internal BatchTaskSlotCounts(int active, int running, int completed, int succeeded, int failed) + { + Active = active; + Running = running; + Completed = completed; + Succeeded = succeeded; + Failed = failed; + } + + /// Initializes a new instance of . + /// The number of TaskSlots for active Tasks. + /// The number of TaskSlots for running Tasks. + /// The number of TaskSlots for completed Tasks. + /// The number of TaskSlots for succeeded Tasks. + /// The number of TaskSlots for failed Tasks. + /// Keeps track of any properties unknown to the library. + internal BatchTaskSlotCounts(int active, int running, int completed, int succeeded, int failed, IDictionary serializedAdditionalRawData) + { + Active = active; + Running = running; + Completed = completed; + Succeeded = succeeded; + Failed = failed; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskSlotCounts() + { + } + + /// The number of TaskSlots for active Tasks. + public int Active { get; } + /// The number of TaskSlots for running Tasks. + public int Running { get; } + /// The number of TaskSlots for completed Tasks. + public int Completed { get; } + /// The number of TaskSlots for succeeded Tasks. + public int Succeeded { get; } + /// The number of TaskSlots for failed Tasks. + public int Failed { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskState.cs new file mode 100644 index 0000000000000..5398b1ef91b53 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskState.cs @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchTaskState enums. + public readonly partial struct BatchTaskState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchTaskState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ActiveValue = "active"; + private const string PreparingValue = "preparing"; + private const string RunningValue = "running"; + private const string CompletedValue = "completed"; + + /// The Task is queued and able to run, but is not currently assigned to a Compute Node. A Task enters this state when it is created, when it is enabled after being disabled, or when it is awaiting a retry after a failed run. + public static BatchTaskState Active { get; } = new BatchTaskState(ActiveValue); + /// The Task has been assigned to a Compute Node, but is waiting for a required Job Preparation Task to complete on the Compute Node. If the Job Preparation Task succeeds, the Task will move to running. If the Job Preparation Task fails, the Task will return to active and will be eligible to be assigned to a different Compute Node. + public static BatchTaskState Preparing { get; } = new BatchTaskState(PreparingValue); + /// The Task is running on a Compute Node. This includes task-level preparation such as downloading resource files or deploying Packages specified on the Task - it does not necessarily mean that the Task command line has started executing. + public static BatchTaskState Running { get; } = new BatchTaskState(RunningValue); + /// The Task is no longer eligible to run, usually because the Task has finished successfully, or the Task has finished unsuccessfully and has exhausted its retry limit. A Task is also marked as completed if an error occurred launching the Task, or when the Task has been terminated. + public static BatchTaskState Completed { get; } = new BatchTaskState(CompletedValue); + /// Determines if two values are the same. + public static bool operator ==(BatchTaskState left, BatchTaskState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchTaskState left, BatchTaskState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator BatchTaskState(string value) => new BatchTaskState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchTaskState other && Equals(other); + /// + public bool Equals(BatchTaskState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs new file mode 100644 index 0000000000000..550e7bb1eefef --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs @@ -0,0 +1,227 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class BatchTaskStatistics : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskStatistics)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + writer.WritePropertyName("lastUpdateTime"u8); + writer.WriteStringValue(LastUpdateTime, "O"); + writer.WritePropertyName("userCPUTime"u8); + writer.WriteStringValue(UserCpuTime, "P"); + writer.WritePropertyName("kernelCPUTime"u8); + writer.WriteStringValue(KernelCpuTime, "P"); + writer.WritePropertyName("wallClockTime"u8); + writer.WriteStringValue(WallClockTime, "P"); + writer.WritePropertyName("readIOps"u8); + writer.WriteNumberValue(ReadIOps); + writer.WritePropertyName("writeIOps"u8); + writer.WriteNumberValue(WriteIOps); + writer.WritePropertyName("readIOGiB"u8); + writer.WriteNumberValue(ReadIOGiB); + writer.WritePropertyName("writeIOGiB"u8); + writer.WriteNumberValue(WriteIOGiB); + writer.WritePropertyName("waitTime"u8); + writer.WriteStringValue(WaitTime, "P"); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + BatchTaskStatistics IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(BatchTaskStatistics)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeBatchTaskStatistics(document.RootElement, options); + } + + internal static BatchTaskStatistics DeserializeBatchTaskStatistics(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string url = default; + DateTimeOffset startTime = default; + DateTimeOffset lastUpdateTime = default; + TimeSpan userCPUTime = default; + TimeSpan kernelCPUTime = default; + TimeSpan wallClockTime = default; + long readIOps = default; + long writeIOps = default; + float readIOGiB = default; + float writeIOGiB = default; + TimeSpan waitTime = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastUpdateTime"u8)) + { + lastUpdateTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("userCPUTime"u8)) + { + userCPUTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("kernelCPUTime"u8)) + { + kernelCPUTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("wallClockTime"u8)) + { + wallClockTime = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("readIOps"u8)) + { + readIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("writeIOps"u8)) + { + writeIOps = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("readIOGiB"u8)) + { + readIOGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("writeIOGiB"u8)) + { + writeIOGiB = property.Value.GetSingle(); + continue; + } + if (property.NameEquals("waitTime"u8)) + { + waitTime = property.Value.GetTimeSpan("P"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new BatchTaskStatistics( + url, + startTime, + lastUpdateTime, + userCPUTime, + kernelCPUTime, + wallClockTime, + readIOps, + writeIOps, + readIOGiB, + writeIOGiB, + waitTime, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(BatchTaskStatistics)} does not support writing '{options.Format}' format."); + } + } + + BatchTaskStatistics IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeBatchTaskStatistics(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(BatchTaskStatistics)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static BatchTaskStatistics FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeBatchTaskStatistics(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs new file mode 100644 index 0000000000000..d3c072e4c5e2a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Resource usage statistics for a Task. + public partial class BatchTaskStatistics + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URL of the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. + /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. + /// The total number of disk read operations made by the Task. + /// The total number of disk write operations made by the Task. + /// The total gibibytes read from disk by the Task. + /// The total gibibytes written to disk by the Task. + /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). + /// is null. + public BatchTaskStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, TimeSpan waitTime) + { + Argument.AssertNotNull(url, nameof(url)); + + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UserCpuTime = userCpuTime; + KernelCpuTime = kernelCpuTime; + WallClockTime = wallClockTime; + ReadIOps = readIOps; + WriteIOps = writeIOps; + ReadIOGiB = readIOGiB; + WriteIOGiB = writeIOGiB; + WaitTime = waitTime; + } + + /// Initializes a new instance of . + /// The URL of the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. + /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. + /// The total number of disk read operations made by the Task. + /// The total number of disk write operations made by the Task. + /// The total gibibytes read from disk by the Task. + /// The total gibibytes written to disk by the Task. + /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). + /// Keeps track of any properties unknown to the library. + internal BatchTaskStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, TimeSpan waitTime, IDictionary serializedAdditionalRawData) + { + Url = url; + StartTime = startTime; + LastUpdateTime = lastUpdateTime; + UserCpuTime = userCpuTime; + KernelCpuTime = kernelCpuTime; + WallClockTime = wallClockTime; + ReadIOps = readIOps; + WriteIOps = writeIOps; + ReadIOGiB = readIOGiB; + WriteIOGiB = writeIOGiB; + WaitTime = waitTime; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal BatchTaskStatistics() + { + } + + /// The URL of the statistics. + public string Url { get; set; } + /// The start time of the time range covered by the statistics. + public DateTimeOffset StartTime { get; set; } + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + public DateTimeOffset LastUpdateTime { get; set; } + /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. + public TimeSpan UserCpuTime { get; set; } + /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. + public TimeSpan KernelCpuTime { get; set; } + /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. + public TimeSpan WallClockTime { get; set; } + /// The total number of disk read operations made by the Task. + public long ReadIOps { get; set; } + /// The total number of disk write operations made by the Task. + public long WriteIOps { get; set; } + /// The total gibibytes read from disk by the Task. + public float ReadIOGiB { get; set; } + /// The total gibibytes written to disk by the Task. + public float WriteIOGiB { get; set; } + /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). + public TimeSpan WaitTime { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/CachingType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/CachingType.cs new file mode 100644 index 0000000000000..019a860e6a45a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/CachingType.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// CachingType enums. + public readonly partial struct CachingType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public CachingType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoneValue = "none"; + private const string ReadOnlyValue = "readonly"; + private const string ReadWriteValue = "readwrite"; + + /// The caching mode for the disk is not enabled. + public static CachingType None { get; } = new CachingType(NoneValue); + /// The caching mode for the disk is read only. + public static CachingType ReadOnly { get; } = new CachingType(ReadOnlyValue); + /// The caching mode for the disk is read and write. + public static CachingType ReadWrite { get; } = new CachingType(ReadWriteValue); + /// Determines if two values are the same. + public static bool operator ==(CachingType left, CachingType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(CachingType left, CachingType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator CachingType(string value) => new CachingType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is CachingType other && Equals(other); + /// + public bool Equals(CachingType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/CifsMountConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/CifsMountConfiguration.Serialization.cs new file mode 100644 index 0000000000000..e7c24edf8cc06 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/CifsMountConfiguration.Serialization.cs @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class CifsMountConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CifsMountConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("username"u8); + writer.WriteStringValue(Username); + writer.WritePropertyName("source"u8); + writer.WriteStringValue(Source); + writer.WritePropertyName("relativeMountPath"u8); + writer.WriteStringValue(RelativeMountPath); + if (Optional.IsDefined(MountOptions)) + { + writer.WritePropertyName("mountOptions"u8); + writer.WriteStringValue(MountOptions); + } + writer.WritePropertyName("password"u8); + writer.WriteStringValue(Password); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + CifsMountConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(CifsMountConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeCifsMountConfiguration(document.RootElement, options); + } + + internal static CifsMountConfiguration DeserializeCifsMountConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string username = default; + string source = default; + string relativeMountPath = default; + string mountOptions = default; + string password = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("username"u8)) + { + username = property.Value.GetString(); + continue; + } + if (property.NameEquals("source"u8)) + { + source = property.Value.GetString(); + continue; + } + if (property.NameEquals("relativeMountPath"u8)) + { + relativeMountPath = property.Value.GetString(); + continue; + } + if (property.NameEquals("mountOptions"u8)) + { + mountOptions = property.Value.GetString(); + continue; + } + if (property.NameEquals("password"u8)) + { + password = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new CifsMountConfiguration( + username, + source, + relativeMountPath, + mountOptions, + password, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(CifsMountConfiguration)} does not support writing '{options.Format}' format."); + } + } + + CifsMountConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeCifsMountConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(CifsMountConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static CifsMountConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeCifsMountConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/CifsMountConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/CifsMountConfiguration.cs new file mode 100644 index 0000000000000..e980e98448462 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/CifsMountConfiguration.cs @@ -0,0 +1,100 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information used to connect to a CIFS file system. + public partial class CifsMountConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The user to use for authentication against the CIFS file system. + /// The URI of the file system to mount. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// The password to use for authentication against the CIFS file system. + /// , , or is null. + public CifsMountConfiguration(string username, string source, string relativeMountPath, string password) + { + Argument.AssertNotNull(username, nameof(username)); + Argument.AssertNotNull(source, nameof(source)); + Argument.AssertNotNull(relativeMountPath, nameof(relativeMountPath)); + Argument.AssertNotNull(password, nameof(password)); + + Username = username; + Source = source; + RelativeMountPath = relativeMountPath; + Password = password; + } + + /// Initializes a new instance of . + /// The user to use for authentication against the CIFS file system. + /// The URI of the file system to mount. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + /// The password to use for authentication against the CIFS file system. + /// Keeps track of any properties unknown to the library. + internal CifsMountConfiguration(string username, string source, string relativeMountPath, string mountOptions, string password, IDictionary serializedAdditionalRawData) + { + Username = username; + Source = source; + RelativeMountPath = relativeMountPath; + MountOptions = mountOptions; + Password = password; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal CifsMountConfiguration() + { + } + + /// The user to use for authentication against the CIFS file system. + public string Username { get; set; } + /// The URI of the file system to mount. + public string Source { get; set; } + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + public string RelativeMountPath { get; set; } + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + public string MountOptions { get; set; } + /// The password to use for authentication against the CIFS file system. + public string Password { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchClientBuilderExtensions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchClientBuilderExtensions.cs new file mode 100644 index 0000000000000..982156ace9ac0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchClientBuilderExtensions.cs @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using Azure.Compute.Batch; +using Azure.Core.Extensions; + +namespace Microsoft.Extensions.Azure +{ + /// Extension methods to add to client builder. + public static partial class ComputeBatchClientBuilderExtensions + { + /// Registers a instance. + /// The builder to register with. + /// Batch account endpoint (for example: https://batchaccount.eastus2.batch.azure.com). + public static IAzureClientBuilder AddBatchClient(this TBuilder builder, Uri endpoint) + where TBuilder : IAzureClientFactoryBuilderWithCredential + { + return builder.RegisterClientFactory((options, cred) => new BatchClient(endpoint, cred, options)); + } + + /// Registers a instance. + /// The builder to register with. + /// The configuration values. + public static IAzureClientBuilder AddBatchClient(this TBuilder builder, TConfiguration configuration) + where TBuilder : IAzureClientFactoryBuilderWithConfiguration + { + return builder.RegisterClientFactory(configuration); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs new file mode 100644 index 0000000000000..78fd6bcbc4076 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs @@ -0,0 +1,1168 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + /// Model factory for models. + public static partial class ComputeBatchModelFactory + { + /// Initializes a new instance of . + /// A string that uniquely identifies the application within the Account. + /// The display name for the application. + /// The list of available versions of the application. + /// A new instance for mocking. + public static BatchApplication BatchApplication(string id = null, string displayName = null, IEnumerable versions = null) + { + versions ??= new List(); + + return new BatchApplication(id, displayName, versions?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the Pool whose metrics are aggregated in this entry. + /// The start time of the aggregation interval covered by this entry. + /// The end time of the aggregation interval covered by this entry. + /// The size of virtual machines in the Pool. All VMs in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The total core hours used in the Pool during this aggregation interval. + /// A new instance for mocking. + public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, DateTimeOffset startTime = default, DateTimeOffset endTime = default, string vmSize = null, float totalCoreHours = default) + { + return new BatchPoolUsageMetrics( + poolId, + startTime, + endTime, + vmSize, + totalCoreHours, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines (Linux) (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) or Sizes for Virtual Machines (Windows) (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + /// The virtual machine configuration for the Pool. This property must be specified. + /// The timeout for allocation of Compute Nodes to the Pool. This timeout applies only to manual scaling; it has no effect when enableAutoScale is set to true. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + /// The desired number of dedicated Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to true. If enableAutoScale is set to false, then you must set either targetDedicatedNodes, targetLowPriorityNodes, or both. + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. + /// A formula for the desired number of Compute Nodes in the Pool. This property must not be specified if enableAutoScale is set to false. It is required if enableAutoScale is set to true. The formula is checked for validity before the Pool is created. If the formula is not valid, the Batch service rejects the request with detailed error information. For more information about specifying this formula, see 'Automatically scale Compute Nodes in an Azure Batch Pool' (https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/). + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false. + /// The network configuration for the Pool. + /// A Task specified to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. + /// The list of Packages to be installed on each Compute Node in the Pool. When creating a pool, the package's application ID must be fully qualified (/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}). Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + /// The list of user Accounts to be created on each Compute Node in the Pool. + /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. + /// The desired node communication mode for the pool. If omitted, the default value is Default. + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + /// A new instance for mocking. + public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IDictionary resourceTags = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + { + resourceTags ??= new Dictionary(); + applicationPackageReferences ??= new List(); + userAccounts ??= new List(); + metadata ??= new List(); + mountConfiguration ??= new List(); + + return new BatchPoolCreateContent( + id, + displayName, + vmSize, + virtualMachineConfiguration, + resizeTimeout, + resourceTags, + targetDedicatedNodes, + targetLowPriorityNodes, + enableAutoScale, + autoScaleFormula, + autoScaleEvaluationInterval, + enableInterNodeCommunication, + networkConfiguration, + startTask, + applicationPackageReferences?.ToList(), + taskSlotsPerNode, + taskSchedulingPolicy, + userAccounts?.ToList(), + metadata?.ToList(), + mountConfiguration?.ToList(), + targetNodeCommunicationMode, + upgradePolicy, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. + /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. + /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. + /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. + /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. + /// A new instance for mocking. + public static ImageReference ImageReference(string publisher = null, string offer = null, string sku = null, string version = null, string virtualMachineImageId = null, string exactVersion = null) + { + return new ImageReference( + publisher, + offer, + sku, + version, + virtualMachineImageId, + exactVersion, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The URL of the Pool. + /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. + /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. + /// The creation time of the Pool. + /// The current state of the Pool. + /// The time at which the Pool entered its current state. + /// Whether the Pool is resizing. + /// The time at which the Pool entered its current allocation state. + /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The virtual machine configuration for the Pool. This property must be specified. + /// The timeout for allocation of Compute Nodes to the Pool. This is the timeout for the most recent resize operation. (The initial sizing when the Pool is created counts as a resize.) The default value is 15 minutes. + /// A list of errors encountered while performing the last resize on the Pool. This property is set only if one or more errors occurred during the last Pool resize, and only when the Pool allocationState is Steady. + /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'. + /// The number of dedicated Compute Nodes currently in the Pool. + /// The number of Spot/Low-priority Compute Nodes currently in the Pool. Spot/Low-priority Compute Nodes which have been preempted are included in this count. + /// The desired number of dedicated Compute Nodes in the Pool. + /// The desired number of Spot/Low-priority Compute Nodes in the Pool. + /// Whether the Pool size should automatically adjust over time. If false, at least one of targetDedicatedNodes and targetLowPriorityNodes must be specified. If true, the autoScaleFormula property is required and the Pool automatically resizes according to the formula. The default value is false. + /// A formula for the desired number of Compute Nodes in the Pool. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + /// The results and errors from the last execution of the autoscale formula. This property is set only if the Pool automatically scales, i.e. enableAutoScale is true. + /// Whether the Pool permits direct communication between Compute Nodes. This imposes restrictions on which Compute Nodes can be assigned to the Pool. Specifying this value can reduce the chance of the requested number of Compute Nodes to be allocated in the Pool. + /// The network configuration for the Pool. + /// A Task specified to run on each Compute Node as it joins the Pool. + /// The list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Package references on any given Pool. + /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256. + /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. + /// The list of user Accounts to be created on each Compute Node in the Pool. + /// A list of name-value pairs associated with the Pool as metadata. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the CloudPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + /// The desired node communication mode for the pool. If omitted, the default value is Default. + /// The current state of the pool communication mode. + /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. + /// A new instance for mocking. + public static BatchPool BatchPool(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics stats = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, BatchNodeCommunicationMode? currentNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + { + resizeErrors ??= new List(); + resourceTags ??= new Dictionary(); + applicationPackageReferences ??= new List(); + userAccounts ??= new List(); + metadata ??= new List(); + mountConfiguration ??= new List(); + + return new BatchPool( + id, + displayName, + url, + eTag, + lastModified, + creationTime, + state, + stateTransitionTime, + allocationState, + allocationStateTransitionTime, + vmSize, + virtualMachineConfiguration, + resizeTimeout, + resizeErrors?.ToList(), + resourceTags, + currentDedicatedNodes, + currentLowPriorityNodes, + targetDedicatedNodes, + targetLowPriorityNodes, + enableAutoScale, + autoScaleFormula, + autoScaleEvaluationInterval, + autoScaleRun, + enableInterNodeCommunication, + networkConfiguration, + startTask, + applicationPackageReferences?.ToList(), + taskSlotsPerNode, + taskSchedulingPolicy, + userAccounts?.ToList(), + metadata?.ToList(), + stats, + mountConfiguration?.ToList(), + identity, + targetNodeCommunicationMode, + currentNodeCommunicationMode, + upgradePolicy, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Pool resize error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the Pool resize error. + /// A new instance for mocking. + public static ResizeError ResizeError(string code = null, string message = null, IEnumerable values = null) + { + values ??= new List(); + + return new ResizeError(code, message, values?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The time at which the autoscale formula was last evaluated. + /// The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form $variable=value, and variables are separated by semicolons. + /// Details of the error encountered evaluating the autoscale formula on the Pool, if the evaluation was unsuccessful. + /// A new instance for mocking. + public static AutoScaleRun AutoScaleRun(DateTimeOffset timestamp = default, string results = null, AutoScaleRunError error = null) + { + return new AutoScaleRun(timestamp, results, error, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// An identifier for the autoscale error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the autoscale error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the autoscale error. + /// A new instance for mocking. + public static AutoScaleRunError AutoScaleRunError(string code = null, string message = null, IEnumerable values = null) + { + values ??= new List(); + + return new AutoScaleRunError(code, message, values?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The URL for the statistics. + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// Statistics related to Pool usage, such as the amount of core-time used. + /// Statistics related to resource consumption by Compute Nodes in the Pool. + /// A new instance for mocking. + public static BatchPoolStatistics BatchPoolStatistics(string url = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, BatchPoolUsageStatistics usageStats = null, BatchPoolResourceStatistics resourceStats = null) + { + return new BatchPoolStatistics( + url, + startTime, + lastUpdateTime, + usageStats, + resourceStats, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The aggregated wall-clock time of the dedicated Compute Node cores being part of the Pool. + /// A new instance for mocking. + public static BatchPoolUsageStatistics BatchPoolUsageStatistics(DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan dedicatedCoreTime = default) + { + return new BatchPoolUsageStatistics(startTime, lastUpdateTime, dedicatedCoreTime, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The start time of the time range covered by the statistics. + /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. + /// The average CPU usage across all Compute Nodes in the Pool (percentage per node). + /// The average memory usage in GiB across all Compute Nodes in the Pool. + /// The peak memory usage in GiB across all Compute Nodes in the Pool. + /// The average used disk space in GiB across all Compute Nodes in the Pool. + /// The peak used disk space in GiB across all Compute Nodes in the Pool. + /// The total number of disk read operations across all Compute Nodes in the Pool. + /// The total number of disk write operations across all Compute Nodes in the Pool. + /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. + /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. + /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. + /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. + /// A new instance for mocking. + public static BatchPoolResourceStatistics BatchPoolResourceStatistics(DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, float avgCpuPercentage = default, float avgMemoryGiB = default, float peakMemoryGiB = default, float avgDiskGiB = default, float peakDiskGiB = default, long diskReadIOps = default, long diskWriteIOps = default, float diskReadGiB = default, float diskWriteGiB = default, float networkReadGiB = default, float networkWriteGiB = default) + { + return new BatchPoolResourceStatistics( + startTime, + lastUpdateTime, + avgCpuPercentage, + avgMemoryGiB, + peakMemoryGiB, + avgDiskGiB, + peakDiskGiB, + diskReadIOps, + diskWriteIOps, + diskReadGiB, + diskWriteGiB, + networkReadGiB, + networkWriteGiB, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + /// The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + /// A new instance for mocking. + public static BatchPoolIdentity BatchPoolIdentity(BatchPoolIdentityType type = default, IEnumerable userAssignedIdentities = null) + { + userAssignedIdentities ??= new List(); + + return new BatchPoolIdentity(type, userAssignedIdentities?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ARM resource id of the user assigned identity. + /// The client id of the user assigned identity. + /// The principal id of the user assigned identity. + /// A new instance for mocking. + public static UserAssignedIdentity UserAssignedIdentity(string resourceId = null, string clientId = null, string principalId = null) + { + return new UserAssignedIdentity(resourceId, clientId, principalId, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the Compute Node agent SKU which the Image supports. + /// The reference to the Azure Virtual Machine's Marketplace Image. + /// The type of operating system (e.g. Windows or Linux) of the Image. + /// The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. + /// The time when the Azure Batch service will stop accepting create Pool requests for the Image. + /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. + /// A new instance for mocking. + public static BatchSupportedImage BatchSupportedImage(string nodeAgentSkuId = null, ImageReference imageReference = null, OSType osType = default, IEnumerable capabilities = null, DateTimeOffset? batchSupportEndOfLife = null, ImageVerificationType verificationType = default) + { + capabilities ??= new List(); + + return new BatchSupportedImage( + nodeAgentSkuId, + imageReference, + osType, + capabilities?.ToList(), + batchSupportEndOfLife, + verificationType, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the Pool. + /// The number of dedicated Compute Nodes in each state. + /// The number of Spot/Low-priority Compute Nodes in each state. + /// A new instance for mocking. + public static BatchPoolNodeCounts BatchPoolNodeCounts(string poolId = null, BatchNodeCounts dedicated = null, BatchNodeCounts lowPriority = null) + { + return new BatchPoolNodeCounts(poolId, dedicated, lowPriority, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The number of Compute Nodes in the creating state. + /// The number of Compute Nodes in the idle state. + /// The number of Compute Nodes in the offline state. + /// The number of Compute Nodes in the preempted state. + /// The count of Compute Nodes in the rebooting state. + /// The number of Compute Nodes in the reimaging state. + /// The number of Compute Nodes in the running state. + /// The number of Compute Nodes in the starting state. + /// The number of Compute Nodes in the startTaskFailed state. + /// The number of Compute Nodes in the leavingPool state. + /// The number of Compute Nodes in the unknown state. + /// The number of Compute Nodes in the unusable state. + /// The number of Compute Nodes in the waitingForStartTask state. + /// The total number of Compute Nodes. + /// The number of Compute Nodes in the upgradingOS state. + /// A new instance for mocking. + public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = default, int offline = default, int preempted = default, int rebooting = default, int reimaging = default, int running = default, int starting = default, int startTaskFailed = default, int leavingPool = default, int unknown = default, int unusable = default, int waitingForStartTask = default, int total = default, int upgradingOs = default) + { + return new BatchNodeCounts( + creating, + idle, + offline, + preempted, + rebooting, + reimaging, + running, + starting, + startTaskFailed, + leavingPool, + unknown, + unusable, + waitingForStartTask, + total, + upgradingOs, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the Job. + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + /// The URL of the Job. + /// The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. + /// The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. + /// The creation time of the Job. + /// The current state of the Job. + /// The time at which the Job entered its current state. + /// The previous state of the Job. This property is not set if the Job is in its initial Active state. + /// The time at which the Job entered its previous state. This property is not set if the Job is in its initial Active state. + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The execution constraints for the Job. + /// Details of a Job Manager Task to be launched when the Job is started. + /// The Job Preparation Task. The Job Preparation Task is a special Task run on each Compute Node before any other Task of the Job. + /// The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. + /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + /// The Pool settings associated with the Job. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The network configuration for the Job. + /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// The execution information for the Job. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the CloudJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// A new instance for mocking. + public static BatchJob BatchJob(string id = null, string displayName = null, bool? usesTaskDependencies = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, OnAllBatchTasksComplete? onAllTasksComplete = null, OnBatchTaskFailure? onTaskFailure = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null, BatchJobExecutionInfo executionInfo = null, BatchJobStatistics stats = null) + { + commonEnvironmentSettings ??= new List(); + metadata ??= new List(); + + return new BatchJob( + id, + displayName, + usesTaskDependencies, + url, + eTag, + lastModified, + creationTime, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + priority, + allowTaskPreemption, + maxParallelTasks, + constraints, + jobManagerTask, + jobPreparationTask, + jobReleaseTask, + commonEnvironmentSettings?.ToList(), + poolInfo, + onAllTasksComplete, + onTaskFailure, + networkConfiguration, + metadata?.ToList(), + executionInfo, + stats, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// Whether Tasks in the Job can define dependencies on each other. The default is false. + /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. The default value is 0. + /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. + /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. + /// The execution constraints for the Job. + /// Details of a Job Manager Task to be launched when the Job is started. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. The Job Manager Task's typical purpose is to control and/or monitor Job execution, for example by deciding what additional Tasks to run, determining when the work is complete, etc. (However, a Job Manager Task is not restricted to these activities - it is a fully-fledged Task in the system and perform whatever actions are required for the Job.) For example, a Job Manager Task might download a file specified as a parameter, analyze the contents of that file and submit additional Tasks based on those contents. + /// The Job Preparation Task. If a Job has a Job Preparation Task, the Batch service will run the Job Preparation Task on a Node before starting any Tasks of that Job on that Compute Node. + /// The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. + /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. + /// The Pool on which the Batch service runs the Job's Tasks. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The network configuration for the Job. + /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// A new instance for mocking. + public static BatchJobCreateContent BatchJobCreateContent(string id = null, string displayName = null, bool? usesTaskDependencies = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, OnAllBatchTasksComplete? onAllTasksComplete = null, OnBatchTaskFailure? onTaskFailure = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null) + { + commonEnvironmentSettings ??= new List(); + metadata ??= new List(); + + return new BatchJobCreateContent( + id, + displayName, + usesTaskDependencies, + priority, + allowTaskPreemption, + maxParallelTasks, + constraints, + jobManagerTask, + jobPreparationTask, + jobReleaseTask, + commonEnvironmentSettings?.ToList(), + poolInfo, + onAllTasksComplete, + onTaskFailure, + networkConfiguration, + metadata?.ToList(), + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the Pool containing the Compute Node to which this entry refers. + /// The ID of the Compute Node to which this entry refers. + /// The URL of the Compute Node to which this entry refers. + /// Information about the execution status of the Job Preparation Task on this Compute Node. + /// Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. + /// A new instance for mocking. + public static BatchJobPreparationAndReleaseTaskStatus BatchJobPreparationAndReleaseTaskStatus(string poolId = null, string nodeId = null, string nodeUrl = null, BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo = null, BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo = null) + { + return new BatchJobPreparationAndReleaseTaskStatus( + poolId, + nodeId, + nodeUrl, + jobPreparationTaskExecutionInfo, + jobReleaseTaskExecutionInfo, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + /// The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. + /// The current state of the Job Preparation Task on the Compute Node. + /// The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. + /// The URL to the root directory of the Job Preparation Task on the Compute Node. + /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + /// The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// A new instance for mocking. + public static BatchJobPreparationTaskExecutionInfo BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchJobPreparationTaskState state = default, string taskRootDirectory = null, string taskRootDirectoryUrl = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, int retryCount = default, DateTimeOffset? lastRetryTime = null, BatchTaskExecutionResult? result = null) + { + return new BatchJobPreparationTaskExecutionInfo( + startTime, + endTime, + state, + taskRootDirectory, + taskRootDirectoryUrl, + exitCode, + containerInfo, + failureInfo, + retryCount, + lastRetryTime, + result, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. + /// The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. + /// The current state of the Job Release Task on the Compute Node. + /// The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. + /// The URL to the root directory of the Job Release Task on the Compute Node. + /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// A new instance for mocking. + public static BatchJobReleaseTaskExecutionInfo BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchJobReleaseTaskState state = default, string taskRootDirectory = null, string taskRootDirectoryUrl = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, BatchTaskExecutionResult? result = null) + { + return new BatchJobReleaseTaskExecutionInfo( + startTime, + endTime, + state, + taskRootDirectory, + taskRootDirectoryUrl, + exitCode, + containerInfo, + failureInfo, + result, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The number of Tasks per state. + /// The number of TaskSlots required by Tasks per state. + /// A new instance for mocking. + public static BatchTaskCountsResult BatchTaskCountsResult(BatchTaskCounts taskCounts = null, BatchTaskSlotCounts taskSlotCounts = null) + { + return new BatchTaskCountsResult(taskCounts, taskSlotCounts, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The number of Tasks in the active state. + /// The number of Tasks in the running or preparing state. + /// The number of Tasks in the completed state. + /// The number of Tasks which succeeded. A Task succeeds if its result (found in the executionInfo property) is 'success'. + /// The number of Tasks which failed. A Task fails if its result (found in the executionInfo property) is 'failure'. + /// A new instance for mocking. + public static BatchTaskCounts BatchTaskCounts(int active = default, int running = default, int completed = default, int succeeded = default, int failed = default) + { + return new BatchTaskCounts( + active, + running, + completed, + succeeded, + failed, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The number of TaskSlots for active Tasks. + /// The number of TaskSlots for running Tasks. + /// The number of TaskSlots for completed Tasks. + /// The number of TaskSlots for succeeded Tasks. + /// The number of TaskSlots for failed Tasks. + /// A new instance for mocking. + public static BatchTaskSlotCounts BatchTaskSlotCounts(int active = default, int running = default, int completed = default, int succeeded = default, int failed = default) + { + return new BatchTaskSlotCounts( + active, + running, + completed, + succeeded, + failed, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the schedule within the Account. + /// The display name for the schedule. + /// The URL of the Job Schedule. + /// The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. + /// The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. + /// The creation time of the Job Schedule. + /// The current state of the Job Schedule. + /// The time at which the Job Schedule entered the current state. + /// The previous state of the Job Schedule. This property is not present if the Job Schedule is in its initial active state. + /// The time at which the Job Schedule entered its previous state. This property is not present if the Job Schedule is in its initial active state. + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + /// The details of the Jobs to be created on this schedule. + /// Information about Jobs that have been and will be run under this schedule. + /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// A new instance for mocking. + public static BatchJobSchedule BatchJobSchedule(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobScheduleState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobScheduleState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, BatchJobScheduleConfiguration schedule = null, BatchJobSpecification jobSpecification = null, BatchJobScheduleExecutionInfo executionInfo = null, IEnumerable metadata = null, BatchJobScheduleStatistics stats = null) + { + metadata ??= new List(); + + return new BatchJobSchedule( + id, + displayName, + url, + eTag, + lastModified, + creationTime, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + schedule, + jobSpecification, + executionInfo, + metadata?.ToList(), + stats, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). + /// The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. + /// The details of the Jobs to be created on this schedule. + /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. + /// A new instance for mocking. + public static BatchJobScheduleCreateContent BatchJobScheduleCreateContent(string id = null, string displayName = null, BatchJobScheduleConfiguration schedule = null, BatchJobSpecification jobSpecification = null, IEnumerable metadata = null) + { + metadata ??= new List(); + + return new BatchJobScheduleCreateContent( + id, + displayName, + schedule, + jobSpecification, + metadata?.ToList(), + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). + /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// How the Batch service should respond when the Task completes. + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + /// A list of environment variable settings for the Task. + /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. + /// The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. + /// The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. + /// The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. + /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. + /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + /// A new instance for mocking. + public static BatchTaskCreateContent BatchTaskCreateContent(string id = null, string displayName = null, ExitConditions exitConditions = null, string commandLine = null, BatchTaskContainerSettings containerSettings = null, IEnumerable resourceFiles = null, IEnumerable outputFiles = null, IEnumerable environmentSettings = null, AffinityInfo affinityInfo = null, BatchTaskConstraints constraints = null, int? requiredSlots = null, UserIdentity userIdentity = null, MultiInstanceSettings multiInstanceSettings = null, BatchTaskDependencies dependsOn = null, IEnumerable applicationPackageReferences = null, AuthenticationTokenSettings authenticationTokenSettings = null) + { + resourceFiles ??= new List(); + outputFiles ??= new List(); + environmentSettings ??= new List(); + applicationPackageReferences ??= new List(); + + return new BatchTaskCreateContent( + id, + displayName, + exitConditions, + commandLine, + containerSettings, + resourceFiles?.ToList(), + outputFiles?.ToList(), + environmentSettings?.ToList(), + affinityInfo, + constraints, + requiredSlots, + userIdentity, + multiInstanceSettings, + dependsOn, + applicationPackageReferences?.ToList(), + authenticationTokenSettings, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. + /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. + /// The URL of the Task. + /// The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. + /// The last modified time of the Task. + /// The creation time of the Task. + /// How the Batch service should respond when the Task completes. + /// The current state of the Task. + /// The time at which the Task entered its current state. + /// The previous state of the Task. This property is not set if the Task is in its initial Active state. + /// The time at which the Task entered its previous state. This property is not set if the Task is in its initial Active state. + /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables). + /// The settings for the container under which the Task runs. If the Pool that will run this Task has containerConfiguration set, this must be set as well. If the Pool that will run this Task doesn't have containerConfiguration set, this must not be set. When this is specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the node) are mapped into the container, all Task environment variables are mapped into the container, and the Task command line is executed in the container. Files produced in the container outside of AZ_BATCH_NODE_ROOT_DIR might not be reflected to the host disk, meaning that Batch file APIs will not be able to access those files. + /// A list of files that the Batch service will download to the Compute Node before running the command line. For multi-instance Tasks, the resource files will only be downloaded to the Compute Node on which the primary Task is executed. There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// A list of files that the Batch service will upload from the Compute Node after running the command line. For multi-instance Tasks, the files will only be uploaded from the Compute Node on which the primary Task is executed. + /// A list of environment variable settings for the Task. + /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. + /// The execution constraints that apply to this Task. + /// The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. + /// The user identity under which the Task runs. If omitted, the Task runs as a non-administrative user unique to the Task. + /// Information about the execution of the Task. + /// Information about the Compute Node on which the Task ran. + /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. + /// Resource usage statistics for the Task. + /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. + /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. + /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. + /// A new instance for mocking. + public static BatchTask BatchTask(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, ExitConditions exitConditions = null, BatchTaskState? state = null, DateTimeOffset? stateTransitionTime = null, BatchTaskState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string commandLine = null, BatchTaskContainerSettings containerSettings = null, IEnumerable resourceFiles = null, IEnumerable outputFiles = null, IEnumerable environmentSettings = null, AffinityInfo affinityInfo = null, BatchTaskConstraints constraints = null, int? requiredSlots = null, UserIdentity userIdentity = null, BatchTaskExecutionInfo executionInfo = null, BatchNodeInfo nodeInfo = null, MultiInstanceSettings multiInstanceSettings = null, BatchTaskStatistics stats = null, BatchTaskDependencies dependsOn = null, IEnumerable applicationPackageReferences = null, AuthenticationTokenSettings authenticationTokenSettings = null) + { + resourceFiles ??= new List(); + outputFiles ??= new List(); + environmentSettings ??= new List(); + applicationPackageReferences ??= new List(); + + return new BatchTask( + id, + displayName, + url, + eTag, + lastModified, + creationTime, + exitConditions, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + commandLine, + containerSettings, + resourceFiles?.ToList(), + outputFiles?.ToList(), + environmentSettings?.ToList(), + affinityInfo, + constraints, + requiredSlots, + userIdentity, + executionInfo, + nodeInfo, + multiInstanceSettings, + stats, + dependsOn, + applicationPackageReferences?.ToList(), + authenticationTokenSettings, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The results of the add Task collection operation. + /// A new instance for mocking. + public static BatchTaskAddCollectionResult BatchTaskAddCollectionResult(IEnumerable value = null) + { + value ??= new List(); + + return new BatchTaskAddCollectionResult(value?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The status of the add Task request. + /// The ID of the Task for which this is the result. + /// The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. + /// The last modified time of the Task. + /// The URL of the Task, if the Task was successfully added. + /// The error encountered while attempting to add the Task. + /// A new instance for mocking. + public static BatchTaskAddResult BatchTaskAddResult(BatchTaskAddStatus status = default, string taskId = null, string eTag = null, DateTimeOffset? lastModified = null, string location = null, BatchError error = null) + { + return new BatchTaskAddResult( + status, + taskId, + eTag, + lastModified, + location, + error, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// An identifier for the error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the error, intended to be suitable for display in a user interface. + /// A collection of key-value pairs containing additional details about the error. + /// A new instance for mocking. + public static BatchError BatchError(string code = null, BatchErrorMessage message = null, IEnumerable values = null) + { + values ??= new List(); + + return new BatchError(code, message, values?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The language code of the error message. + /// The text of the message. + /// A new instance for mocking. + public static BatchErrorMessage BatchErrorMessage(string lang = null, string value = null) + { + return new BatchErrorMessage(lang, value, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// An identifier specifying the meaning of the Value property. + /// The additional information included with the error response. + /// A new instance for mocking. + public static BatchErrorDetail BatchErrorDetail(string key = null, string value = null) + { + return new BatchErrorDetail(key, value, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the subtask. + /// Information about the Compute Node on which the subtask ran. + /// The time at which the subtask started running. If the subtask has been restarted or retried, this is the most recent time at which the subtask started running. + /// The time at which the subtask completed. This property is set only if the subtask is in the Completed state. + /// The exit code of the program specified on the subtask command line. This property is set only if the subtask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the subtask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The current state of the subtask. + /// The time at which the subtask entered its current state. + /// The previous state of the subtask. This property is not set if the subtask is in its initial running state. + /// The time at which the subtask entered its previous state. This property is not set if the subtask is in its initial running state. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// A new instance for mocking. + public static BatchSubtask BatchSubtask(int? id = null, BatchNodeInfo nodeInfo = null, DateTimeOffset? startTime = null, DateTimeOffset? endTime = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, BatchSubtaskState? state = null, DateTimeOffset? stateTransitionTime = null, BatchSubtaskState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, BatchTaskExecutionResult? result = null) + { + return new BatchSubtask( + id, + nodeInfo, + startTime, + endTime, + exitCode, + containerInfo, + failureInfo, + state, + stateTransitionTime, + previousState, + previousStateTransitionTime, + result, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The file path. + /// The URL of the file. + /// Whether the object represents a directory. + /// The file properties. + /// A new instance for mocking. + public static BatchNodeFile BatchNodeFile(string name = null, string url = null, bool? isDirectory = null, FileProperties properties = null) + { + return new BatchNodeFile(name, url, isDirectory, properties, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The file creation time. The creation time is not returned for files on Linux Compute Nodes. + /// The time at which the file was last modified. + /// The length of the file. + /// The content type of the file. + /// The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. + /// A new instance for mocking. + public static FileProperties FileProperties(DateTimeOffset? creationTime = null, DateTimeOffset lastModified = default, long contentLength = default, string contentType = null, string fileMode = null) + { + return new FileProperties( + creationTime, + lastModified, + contentLength, + contentType, + fileMode, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The user name of the Account. + /// Whether the Account should be an administrator on the Compute Node. The default value is false. + /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. + /// The password of the Account. The password is required for Windows Compute Nodes (those created with 'virtualMachineConfiguration' using a Windows Image reference). For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. + /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// A new instance for mocking. + public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name = null, bool? isAdmin = null, DateTimeOffset? expiryTime = null, string password = null, string sshPublicKey = null) + { + return new BatchNodeUserCreateContent( + name, + isAdmin, + expiryTime, + password, + sshPublicKey, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. + /// The URL of the Compute Node. + /// The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. + /// Whether the Compute Node is available for Task scheduling. + /// The time at which the Compute Node entered its current state. + /// The last time at which the Compute Node was started. This property may not be present if the Compute Node state is unusable. + /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. + /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. + /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. + /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://docs.microsoft.com/azure/batch/batch-pool-vm-sizes). + /// The total number of Job Tasks completed on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// The total number of currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// The total number of scheduling slots used by currently running Job Tasks on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// The total number of Job Tasks which completed successfully (with exitCode 0) on the Compute Node. This includes Job Manager Tasks and normal Tasks, but not Job Preparation, Job Release or Start Tasks. + /// A list of Tasks whose state has recently changed. This property is present only if at least one Task has run on this Compute Node since it was assigned to the Pool. + /// The Task specified to run on the Compute Node as it joins the Pool. + /// Runtime information about the execution of the StartTask on the Compute Node. + /// The list of errors that are currently being encountered by the Compute Node. + /// Whether this Compute Node is a dedicated Compute Node. If false, the Compute Node is a Spot/Low-priority Compute Node. + /// The endpoint configuration for the Compute Node. + /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. + /// Info about the current state of the virtual machine. + /// A new instance for mocking. + public static BatchNode BatchNode(string id = null, string url = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, string ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) + { + recentTasks ??= new List(); + errors ??= new List(); + + return new BatchNode( + id, + url, + state, + schedulingState, + stateTransitionTime, + lastBootTime, + allocationTime, + ipAddress, + affinityId, + vmSize, + totalTasksRun, + runningTasksCount, + runningTaskSlotsCount, + totalTasksSucceeded, + recentTasks?.ToList(), + startTask, + startTaskInfo, + errors?.ToList(), + isDedicated, + endpointConfiguration, + nodeAgentInfo, + virtualMachineInfo, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The URL of the Task. + /// The ID of the Job to which the Task belongs. + /// The ID of the Task. + /// The ID of the subtask if the Task is a multi-instance Task. + /// The current state of the Task. + /// Information about the execution of the Task. + /// A new instance for mocking. + public static BatchTaskInfo BatchTaskInfo(string taskUrl = null, string jobId = null, string taskId = null, int? subtaskId = null, BatchTaskState taskState = default, BatchTaskExecutionInfo executionInfo = null) + { + return new BatchTaskInfo( + taskUrl, + jobId, + taskId, + subtaskId, + taskState, + executionInfo, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The state of the StartTask on the Compute Node. + /// The time at which the StartTask started running. This value is reset every time the Task is restarted or retried (that is, this is the most recent time at which the StartTask started running). + /// The time at which the StartTask stopped running. This is the end time of the most recent run of the StartTask, if that run has completed (even if that run failed and a retry is pending). This element is not present if the StartTask is currently running. + /// The exit code of the program specified on the StartTask command line. This property is set only if the StartTask is in the completed state. In general, the exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. However, if the Batch service terminates the StartTask (due to timeout, or user termination via the API) you may see an operating system-defined exit code. + /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. + /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. + /// The number of times the Task has been retried by the Batch service. Task application failures (non-zero exit code) are retried, pre-processing errors (the Task could not be run) and file upload errors are not retried. The Batch service will retry the Task up to the limit specified by the constraints. + /// The most recent time at which a retry of the Task started running. This element is present only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. + /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. + /// A new instance for mocking. + public static BatchStartTaskInfo BatchStartTaskInfo(BatchStartTaskState state = default, DateTimeOffset startTime = default, DateTimeOffset? endTime = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, int retryCount = default, DateTimeOffset? lastRetryTime = null, BatchTaskExecutionResult? result = null) + { + return new BatchStartTaskInfo( + state, + startTime, + endTime, + exitCode, + containerInfo, + failureInfo, + retryCount, + lastRetryTime, + result, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// An identifier for the Compute Node error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Compute Node error, intended to be suitable for display in a user interface. + /// The list of additional error details related to the Compute Node error. + /// A new instance for mocking. + public static BatchNodeError BatchNodeError(string code = null, string message = null, IEnumerable errorDetails = null) + { + errorDetails ??= new List(); + + return new BatchNodeError(code, message, errorDetails?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The list of inbound endpoints that are accessible on the Compute Node. + /// A new instance for mocking. + public static BatchNodeEndpointConfiguration BatchNodeEndpointConfiguration(IEnumerable inboundEndpoints = null) + { + inboundEndpoints ??= new List(); + + return new BatchNodeEndpointConfiguration(inboundEndpoints?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The name of the endpoint. + /// The protocol of the endpoint. + /// The public IP address of the Compute Node. + /// The public fully qualified domain name for the Compute Node. + /// The public port number of the endpoint. + /// The backend port number of the endpoint. + /// A new instance for mocking. + public static InboundEndpoint InboundEndpoint(string name = null, InboundEndpointProtocol protocol = default, string publicIpAddress = null, string publicFQDN = null, int frontendPort = default, int backendPort = default) + { + return new InboundEndpoint( + name, + protocol, + publicIpAddress, + publicFQDN, + frontendPort, + backendPort, + serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at https://github.com/Azure/Batch/blob/master/changelogs/nodeagent/CHANGELOG.md. + /// The time when the Compute Node agent was updated on the Compute Node. This is the most recent time that the Compute Node agent was updated to a new version. + /// A new instance for mocking. + public static BatchNodeAgentInfo BatchNodeAgentInfo(string version = null, DateTimeOffset lastUpdateTime = default) + { + return new BatchNodeAgentInfo(version, lastUpdateTime, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The reference to the Azure Virtual Machine's Marketplace Image. + /// The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. + /// A new instance for mocking. + public static VirtualMachineInfo VirtualMachineInfo(ImageReference imageReference = null, string scaleSetVmResourceId = null) + { + return new VirtualMachineInfo(imageReference, scaleSetVmResourceId, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The IP address used for remote login to the Compute Node. + /// The port used for remote login to the Compute Node. + /// A new instance for mocking. + public static BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(string remoteLoginIpAddress = null, int remoteLoginPort = default) + { + return new BatchNodeRemoteLoginSettings(remoteLoginIpAddress, remoteLoginPort, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. + /// The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. + /// A new instance for mocking. + public static UploadBatchServiceLogsContent UploadBatchServiceLogsContent(string containerUrl = null, DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchNodeIdentityReference identityReference = null) + { + return new UploadBatchServiceLogsContent(containerUrl, startTime, endTime, identityReference, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. + /// The number of log files which will be uploaded. + /// A new instance for mocking. + public static UploadBatchServiceLogsResult UploadBatchServiceLogsResult(string virtualDirectoryName = null, int numberOfFilesUploaded = default) + { + return new UploadBatchServiceLogsResult(virtualDirectoryName, numberOfFilesUploaded, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The provisioning state of the virtual machine extension. + /// The virtual machine extension. + /// The vm extension instance view. + /// A new instance for mocking. + public static BatchNodeVMExtension BatchNodeVMExtension(string provisioningState = null, VMExtension vmExtension = null, VMExtensionInstanceView instanceView = null) + { + return new BatchNodeVMExtension(provisioningState, vmExtension, instanceView, serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The name of the vm extension instance view. + /// The resource status information. + /// The resource status information. + /// A new instance for mocking. + public static VMExtensionInstanceView VMExtensionInstanceView(string name = null, IEnumerable statuses = null, IEnumerable subStatuses = null) + { + statuses ??= new List(); + subStatuses ??= new List(); + + return new VMExtensionInstanceView(name, statuses?.ToList(), subStatuses?.ToList(), serializedAdditionalRawData: null); + } + + /// Initializes a new instance of . + /// The status code. + /// The localized label for the status. + /// Level code. + /// The detailed status message. + /// The time of the status. + /// A new instance for mocking. + public static InstanceViewStatus InstanceViewStatus(string code = null, string displayStatus = null, StatusLevelTypes? level = null, string message = null, DateTimeOffset? time = null) + { + return new InstanceViewStatus( + code, + displayStatus, + level, + message, + time, + serializedAdditionalRawData: null); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.Serialization.cs new file mode 100644 index 0000000000000..d810361f3d8c2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.Serialization.cs @@ -0,0 +1,185 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ContainerConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type.ToString()); + if (Optional.IsCollectionDefined(ContainerImageNames)) + { + writer.WritePropertyName("containerImageNames"u8); + writer.WriteStartArray(); + foreach (var item in ContainerImageNames) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(ContainerRegistries)) + { + writer.WritePropertyName("containerRegistries"u8); + writer.WriteStartArray(); + foreach (var item in ContainerRegistries) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ContainerConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeContainerConfiguration(document.RootElement, options); + } + + internal static ContainerConfiguration DeserializeContainerConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ContainerType type = default; + IList containerImageNames = default; + IList containerRegistries = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("type"u8)) + { + type = new ContainerType(property.Value.GetString()); + continue; + } + if (property.NameEquals("containerImageNames"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + containerImageNames = array; + continue; + } + if (property.NameEquals("containerRegistries"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ContainerRegistryReference.DeserializeContainerRegistryReference(item, options)); + } + containerRegistries = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ContainerConfiguration(type, containerImageNames ?? new ChangeTrackingList(), containerRegistries ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support writing '{options.Format}' format."); + } + } + + ContainerConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeContainerConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ContainerConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeContainerConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.cs new file mode 100644 index 0000000000000..dbcc48aa2857b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.cs @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The configuration for container-enabled Pools. + public partial class ContainerConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The container technology to be used. + public ContainerConfiguration(ContainerType type) + { + Type = type; + ContainerImageNames = new ChangeTrackingList(); + ContainerRegistries = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The container technology to be used. + /// The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. + /// Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. + /// Keeps track of any properties unknown to the library. + internal ContainerConfiguration(ContainerType type, IList containerImageNames, IList containerRegistries, IDictionary serializedAdditionalRawData) + { + Type = type; + ContainerImageNames = containerImageNames; + ContainerRegistries = containerRegistries; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ContainerConfiguration() + { + } + + /// The container technology to be used. + public ContainerType Type { get; set; } + /// The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. + public IList ContainerImageNames { get; } + /// Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. + public IList ContainerRegistries { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs new file mode 100644 index 0000000000000..ccca2db026d5c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ContainerRegistryReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ContainerRegistryReference)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Username)) + { + writer.WritePropertyName("username"u8); + writer.WriteStringValue(Username); + } + if (Optional.IsDefined(Password)) + { + writer.WritePropertyName("password"u8); + writer.WriteStringValue(Password); + } + if (Optional.IsDefined(RegistryServer)) + { + writer.WritePropertyName("registryServer"u8); + writer.WriteStringValue(RegistryServer); + } + if (Optional.IsDefined(IdentityReference)) + { + writer.WritePropertyName("identityReference"u8); + writer.WriteObjectValue(IdentityReference, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ContainerRegistryReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ContainerRegistryReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeContainerRegistryReference(document.RootElement, options); + } + + internal static ContainerRegistryReference DeserializeContainerRegistryReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string username = default; + string password = default; + string registryServer = default; + BatchNodeIdentityReference identityReference = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("username"u8)) + { + username = property.Value.GetString(); + continue; + } + if (property.NameEquals("password"u8)) + { + password = property.Value.GetString(); + continue; + } + if (property.NameEquals("registryServer"u8)) + { + registryServer = property.Value.GetString(); + continue; + } + if (property.NameEquals("identityReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identityReference = BatchNodeIdentityReference.DeserializeBatchNodeIdentityReference(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ContainerRegistryReference(username, password, registryServer, identityReference, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ContainerRegistryReference)} does not support writing '{options.Format}' format."); + } + } + + ContainerRegistryReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeContainerRegistryReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ContainerRegistryReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ContainerRegistryReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeContainerRegistryReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs new file mode 100644 index 0000000000000..3330be8fa2fdb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A private container registry. + public partial class ContainerRegistryReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ContainerRegistryReference() + { + } + + /// Initializes a new instance of . + /// The user name to log into the registry server. + /// The password to log into the registry server. + /// The registry URL. If omitted, the default is "docker.io". + /// The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. + /// Keeps track of any properties unknown to the library. + internal ContainerRegistryReference(string username, string password, string registryServer, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + { + Username = username; + Password = password; + RegistryServer = registryServer; + IdentityReference = identityReference; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The user name to log into the registry server. + public string Username { get; set; } + /// The password to log into the registry server. + public string Password { get; set; } + /// The registry URL. If omitted, the default is "docker.io". + public string RegistryServer { get; set; } + /// The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. + public BatchNodeIdentityReference IdentityReference { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerType.cs new file mode 100644 index 0000000000000..a08890aca76f5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerType.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// ContainerType enums. + public readonly partial struct ContainerType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ContainerType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string DockerCompatibleValue = "dockerCompatible"; + private const string CriCompatibleValue = "criCompatible"; + + /// A Docker compatible container technology will be used to launch the containers. + public static ContainerType DockerCompatible { get; } = new ContainerType(DockerCompatibleValue); + /// A CRI based technology will be used to launch the containers. + public static ContainerType CriCompatible { get; } = new ContainerType(CriCompatibleValue); + /// Determines if two values are the same. + public static bool operator ==(ContainerType left, ContainerType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ContainerType left, ContainerType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ContainerType(string value) => new ContainerType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ContainerType other && Equals(other); + /// + public bool Equals(ContainerType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerWorkingDirectory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerWorkingDirectory.cs new file mode 100644 index 0000000000000..36a6e531931c1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerWorkingDirectory.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// ContainerWorkingDirectory enums. + public readonly partial struct ContainerWorkingDirectory : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ContainerWorkingDirectory(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TaskWorkingDirectoryValue = "taskWorkingDirectory"; + private const string ContainerImageDefaultValue = "containerImageDefault"; + + /// Use the standard Batch service Task working directory, which will contain the Task Resource Files populated by Batch. + public static ContainerWorkingDirectory TaskWorkingDirectory { get; } = new ContainerWorkingDirectory(TaskWorkingDirectoryValue); + /// Use the working directory defined in the container Image. Beware that this directory will not contain the Resource Files downloaded by Batch. + public static ContainerWorkingDirectory ContainerImageDefault { get; } = new ContainerWorkingDirectory(ContainerImageDefaultValue); + /// Determines if two values are the same. + public static bool operator ==(ContainerWorkingDirectory left, ContainerWorkingDirectory right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ContainerWorkingDirectory left, ContainerWorkingDirectory right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ContainerWorkingDirectory(string value) => new ContainerWorkingDirectory(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ContainerWorkingDirectory other && Equals(other); + /// + public bool Equals(ContainerWorkingDirectory other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs new file mode 100644 index 0000000000000..d6cbd406d7a78 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.Serialization.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class DataDisk : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DataDisk)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("lun"u8); + writer.WriteNumberValue(LogicalUnitNumber); + if (Optional.IsDefined(Caching)) + { + writer.WritePropertyName("caching"u8); + writer.WriteStringValue(Caching.Value.ToString()); + } + writer.WritePropertyName("diskSizeGB"u8); + writer.WriteNumberValue(DiskSizeGb); + if (Optional.IsDefined(StorageAccountType)) + { + writer.WritePropertyName("storageAccountType"u8); + writer.WriteStringValue(StorageAccountType.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DataDisk IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DataDisk)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDataDisk(document.RootElement, options); + } + + internal static DataDisk DeserializeDataDisk(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int lun = default; + CachingType? caching = default; + int diskSizeGB = default; + StorageAccountType? storageAccountType = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("lun"u8)) + { + lun = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("caching"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + caching = new CachingType(property.Value.GetString()); + continue; + } + if (property.NameEquals("diskSizeGB"u8)) + { + diskSizeGB = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("storageAccountType"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + storageAccountType = new StorageAccountType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new DataDisk(lun, caching, diskSizeGB, storageAccountType, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DataDisk)} does not support writing '{options.Format}' format."); + } + } + + DataDisk IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDataDisk(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DataDisk)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static DataDisk FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDataDisk(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs new file mode 100644 index 0000000000000..f18f0f94302ee --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DataDisk.cs @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Settings which will be used by the data disks associated to Compute Nodes in + /// the Pool. When using attached data disks, you need to mount and format the + /// disks from within a VM to use them. + /// + public partial class DataDisk + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. + /// The initial disk size in gigabytes. + public DataDisk(int logicalUnitNumber, int diskSizeGb) + { + LogicalUnitNumber = logicalUnitNumber; + DiskSizeGb = diskSizeGb; + } + + /// Initializes a new instance of . + /// The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. + /// The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + /// The initial disk size in gigabytes. + /// The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". + /// Keeps track of any properties unknown to the library. + internal DataDisk(int logicalUnitNumber, CachingType? caching, int diskSizeGb, StorageAccountType? storageAccountType, IDictionary serializedAdditionalRawData) + { + LogicalUnitNumber = logicalUnitNumber; + Caching = caching; + DiskSizeGb = diskSizeGb; + StorageAccountType = storageAccountType; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal DataDisk() + { + } + + /// The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct logicalUnitNumber. The value must be between 0 and 63, inclusive. + public int LogicalUnitNumber { get; set; } + /// The type of caching to be enabled for the data disks. The default value for caching is readwrite. For information about the caching options see: https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. + public CachingType? Caching { get; set; } + /// The initial disk size in gigabytes. + public int DiskSizeGb { get; set; } + /// The storage Account type to be used for the data disk. If omitted, the default is "standard_lrs". + public StorageAccountType? StorageAccountType { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DependencyAction.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DependencyAction.cs new file mode 100644 index 0000000000000..42c5bbb1376be --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DependencyAction.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// DependencyAction enums. + public readonly partial struct DependencyAction : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DependencyAction(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string SatisfyValue = "satisfy"; + private const string BlockValue = "block"; + + /// Satisfy tasks waiting on this task; once all dependencies are satisfied, the task will be scheduled to run. + public static DependencyAction Satisfy { get; } = new DependencyAction(SatisfyValue); + /// Blocks tasks waiting on this task, preventing them from being scheduled. + public static DependencyAction Block { get; } = new DependencyAction(BlockValue); + /// Determines if two values are the same. + public static bool operator ==(DependencyAction left, DependencyAction right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DependencyAction left, DependencyAction right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DependencyAction(string value) => new DependencyAction(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DependencyAction other && Equals(other); + /// + public bool Equals(DependencyAction other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs new file mode 100644 index 0000000000000..8325caf8b88ae --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskPlacement.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// AccessDiffDiskPlacementScope enums. + public readonly partial struct DiffDiskPlacement : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DiffDiskPlacement(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string CacheDiskValue = "cachedisk"; + + /// The Ephemeral OS Disk is stored on the VM cache. + public static DiffDiskPlacement CacheDisk { get; } = new DiffDiskPlacement(CacheDiskValue); + /// Determines if two values are the same. + public static bool operator ==(DiffDiskPlacement left, DiffDiskPlacement right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DiffDiskPlacement left, DiffDiskPlacement right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DiffDiskPlacement(string value) => new DiffDiskPlacement(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DiffDiskPlacement other && Equals(other); + /// + public bool Equals(DiffDiskPlacement other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.Serialization.cs new file mode 100644 index 0000000000000..193f5964b6bd4 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class DiffDiskSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Placement)) + { + writer.WritePropertyName("placement"u8); + writer.WriteStringValue(Placement.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DiffDiskSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDiffDiskSettings(document.RootElement, options); + } + + internal static DiffDiskSettings DeserializeDiffDiskSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DiffDiskPlacement? placement = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("placement"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + placement = new DiffDiskPlacement(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new DiffDiskSettings(placement, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support writing '{options.Format}' format."); + } + } + + DiffDiskSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDiffDiskSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static DiffDiskSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDiffDiskSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs new file mode 100644 index 0000000000000..03ec91b162edb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Specifies the ephemeral Disk Settings for the operating system disk used by the + /// compute node (VM). + /// + public partial class DiffDiskSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public DiffDiskSettings() + { + } + + /// Initializes a new instance of . + /// Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + /// Keeps track of any properties unknown to the library. + internal DiffDiskSettings(DiffDiskPlacement? placement, IDictionary serializedAdditionalRawData) + { + Placement = placement; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://docs.microsoft.com/en-us/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. + public DiffDiskPlacement? Placement { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs new file mode 100644 index 0000000000000..4ad3fb7713f03 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// DisableBatchJobOption enums. + public readonly partial struct DisableBatchJobOption : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DisableBatchJobOption(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string RequeueValue = "requeue"; + private const string TerminateValue = "terminate"; + private const string WaitValue = "wait"; + + /// Terminate running Tasks and requeue them. The Tasks will run again when the Job is enabled. + public static DisableBatchJobOption Requeue { get; } = new DisableBatchJobOption(RequeueValue); + /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. + public static DisableBatchJobOption Terminate { get; } = new DisableBatchJobOption(TerminateValue); + /// Allow currently running Tasks to complete. + public static DisableBatchJobOption Wait { get; } = new DisableBatchJobOption(WaitValue); + /// Determines if two values are the same. + public static bool operator ==(DisableBatchJobOption left, DisableBatchJobOption right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DisableBatchJobOption left, DisableBatchJobOption right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DisableBatchJobOption(string value) => new DisableBatchJobOption(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DisableBatchJobOption other && Equals(other); + /// + public bool Equals(DisableBatchJobOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs new file mode 100644 index 0000000000000..1719ed41f1880 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.Serialization.cs @@ -0,0 +1,152 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class DiskEncryptionConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DiskEncryptionConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(Targets)) + { + writer.WritePropertyName("targets"u8); + writer.WriteStartArray(); + foreach (var item in Targets) + { + writer.WriteStringValue(item.ToString()); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + DiskEncryptionConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(DiskEncryptionConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeDiskEncryptionConfiguration(document.RootElement, options); + } + + internal static DiskEncryptionConfiguration DeserializeDiskEncryptionConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList targets = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("targets"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(new DiskEncryptionTarget(item.GetString())); + } + targets = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new DiskEncryptionConfiguration(targets ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(DiskEncryptionConfiguration)} does not support writing '{options.Format}' format."); + } + } + + DiskEncryptionConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeDiskEncryptionConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(DiskEncryptionConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static DiskEncryptionConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeDiskEncryptionConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs new file mode 100644 index 0000000000000..1a8f6a27f6ca8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionConfiguration.cs @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The disk encryption configuration applied on compute nodes in the pool. + /// Disk encryption configuration is not supported on Linux pool created with + /// Azure Compute Gallery Image. + /// + public partial class DiskEncryptionConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public DiskEncryptionConfiguration() + { + Targets = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. + /// Keeps track of any properties unknown to the library. + internal DiskEncryptionConfiguration(IList targets, IDictionary serializedAdditionalRawData) + { + Targets = targets; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The list of disk targets Batch Service will encrypt on the compute node. If omitted, no disks on the compute nodes in the pool will be encrypted. On Linux pool, only "TemporaryDisk" is supported; on Windows pool, "OsDisk" and "TemporaryDisk" must be specified. + public IList Targets { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionTarget.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionTarget.cs new file mode 100644 index 0000000000000..2a5a267ad6b1d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DiskEncryptionTarget.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// DiskEncryptionTarget enums. + public readonly partial struct DiskEncryptionTarget : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DiskEncryptionTarget(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string OsDiskValue = "osdisk"; + private const string TemporaryDiskValue = "temporarydisk"; + + /// The OS Disk on the compute node is encrypted. + public static DiskEncryptionTarget OsDisk { get; } = new DiskEncryptionTarget(OsDiskValue); + /// The temporary disk on the compute node is encrypted. On Linux this encryption applies to other partitions (such as those on mounted data disks) when encryption occurs at boot time. + public static DiskEncryptionTarget TemporaryDisk { get; } = new DiskEncryptionTarget(TemporaryDiskValue); + /// Determines if two values are the same. + public static bool operator ==(DiskEncryptionTarget left, DiskEncryptionTarget right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DiskEncryptionTarget left, DiskEncryptionTarget right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DiskEncryptionTarget(string value) => new DiskEncryptionTarget(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DiskEncryptionTarget other && Equals(other); + /// + public bool Equals(DiskEncryptionTarget other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml new file mode 100644 index 0000000000000..d206035808744 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml @@ -0,0 +1,19681 @@ + + + + + +This sample shows how to call GetApplicationAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetApplicationAsync(""); +]]> +This sample shows how to call GetApplicationAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetApplicationAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call GetApplication. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetApplication(""); +]]> +This sample shows how to call GetApplication with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetApplication("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call GetApplicationAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetApplicationAsync("", null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("versions")[0].ToString()); +]]> +This sample shows how to call GetApplicationAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetApplicationAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("versions")[0].ToString()); +]]> + + + +This sample shows how to call GetApplication and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetApplication("", null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("versions")[0].ToString()); +]]> +This sample shows how to call GetApplication with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetApplication("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("versions")[0].ToString()); +]]> + + + +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateContent pool = new BatchPoolCreateContent("", ""); +Response response = await client.CreatePoolAsync(pool); +]]> +This sample shows how to call CreatePoolAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateContent pool = new BatchPoolCreateContent("", "") +{ + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = + { + ["key"] = "" + }, + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, +}; +Response response = await client.CreatePoolAsync(pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateContent pool = new BatchPoolCreateContent("", ""); +Response response = client.CreatePool(pool); +]]> +This sample shows how to call CreatePool with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolCreateContent pool = new BatchPoolCreateContent("", "") +{ + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = + { + ["key"] = "" + }, + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, +}; +Response response = client.CreatePool(pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + vmSize = "", +}); +Response response = await client.CreatePoolAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePoolAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + } + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + resizeTimeout = "PT1H23M45S", + resourceTags = new + { + key = "", + }, + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, +}); +Response response = await client.CreatePoolAsync(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + vmSize = "", +}); +Response response = client.CreatePool(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreatePool with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + } + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + resizeTimeout = "PT1H23M45S", + resourceTags = new + { + key = "", + }, + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, +}); +Response response = client.CreatePool(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeletePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeletePoolAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeletePoolAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeletePoolAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeletePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeletePool(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeletePool with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeletePool("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetPoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync(""); +]]> +This sample shows how to call GetPoolAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); +]]> + + + +This sample shows how to call GetPoolAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("", null, null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetPoolAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetPoolAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("eTag").ToString()); +Console.WriteLine(result.GetProperty("lastModified").ToString()); +Console.WriteLine(result.GetProperty("creationTime").ToString()); +Console.WriteLine(result.GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("allocationState").ToString()); +Console.WriteLine(result.GetProperty("allocationStateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("vmSize").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("resizeTimeout").ToString()); +Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("resourceTags").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("currentDedicatedNodes").ToString()); +Console.WriteLine(result.GetProperty("currentLowPriorityNodes").ToString()); +Console.WriteLine(result.GetProperty("targetDedicatedNodes").ToString()); +Console.WriteLine(result.GetProperty("targetLowPriorityNodes").ToString()); +Console.WriteLine(result.GetProperty("enableAutoScale").ToString()); +Console.WriteLine(result.GetProperty("autoScaleFormula").ToString()); +Console.WriteLine(result.GetProperty("autoScaleEvaluationInterval").ToString()); +Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("timestamp").ToString()); +Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("results").ToString()); +Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("enableInterNodeCommunication").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); +Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); +Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("taskSlotsPerNode").ToString()); +Console.WriteLine(result.GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); +Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); +Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("dedicatedCoreTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgCPUPercentage").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgMemoryGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakMemoryGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgDiskGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakDiskGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkReadGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkWriteGiB").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("identity").GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("clientId").ToString()); +Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("principalId").ToString()); +Console.WriteLine(result.GetProperty("targetNodeCommunicationMode").ToString()); +Console.WriteLine(result.GetProperty("currentNodeCommunicationMode").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("mode").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); +Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); +]]> + + + +This sample shows how to call UpdatePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.UpdatePoolAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdatePoolAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + targetNodeCommunicationMode = "default", +}); +Response response = await client.UpdatePoolAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call UpdatePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.UpdatePool("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdatePool with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + targetNodeCommunicationMode = "default", +}); +Response response = client.UpdatePool("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisablePoolAutoScaleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DisablePoolAutoScaleAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisablePoolAutoScaleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DisablePoolAutoScaleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisablePoolAutoScale. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DisablePoolAutoScale(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisablePoolAutoScale with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DisablePoolAutoScale("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnablePoolAutoScaleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent(); +Response response = await client.EnablePoolAutoScaleAsync("", content); +]]> +This sample shows how to call EnablePoolAutoScaleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent +{ + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), +}; +Response response = await client.EnablePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call EnablePoolAutoScale. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent(); +Response response = client.EnablePoolAutoScale("", content); +]]> +This sample shows how to call EnablePoolAutoScale with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent +{ + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), +}; +Response response = client.EnablePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call EnablePoolAutoScaleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.EnablePoolAutoScaleAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnablePoolAutoScaleAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", +}); +Response response = await client.EnablePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnablePoolAutoScale. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.EnablePoolAutoScale("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnablePoolAutoScale with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", +}); +Response response = client.EnablePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EvaluatePoolAutoScaleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); +Response response = await client.EvaluatePoolAutoScaleAsync("", content); +]]> +This sample shows how to call EvaluatePoolAutoScaleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); +Response response = await client.EvaluatePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call EvaluatePoolAutoScale. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); +Response response = client.EvaluatePoolAutoScale("", content); +]]> +This sample shows how to call EvaluatePoolAutoScale with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); +Response response = client.EvaluatePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call EvaluatePoolAutoScaleAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + autoScaleFormula = "", +}); +Response response = await client.EvaluatePoolAutoScaleAsync("", content); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("timestamp").ToString()); +]]> +This sample shows how to call EvaluatePoolAutoScaleAsync with all parameters and request content and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + autoScaleFormula = "", +}); +Response response = await client.EvaluatePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("timestamp").ToString()); +Console.WriteLine(result.GetProperty("results").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); +]]> + + + +This sample shows how to call EvaluatePoolAutoScale and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + autoScaleFormula = "", +}); +Response response = client.EvaluatePoolAutoScale("", content); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("timestamp").ToString()); +]]> +This sample shows how to call EvaluatePoolAutoScale with all parameters and request content and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + autoScaleFormula = "", +}); +Response response = client.EvaluatePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("timestamp").ToString()); +Console.WriteLine(result.GetProperty("results").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); +]]> + + + +This sample shows how to call ResizePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolResizeContent content = new BatchPoolResizeContent(); +Response response = await client.ResizePoolAsync("", content); +]]> +This sample shows how to call ResizePoolAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolResizeContent content = new BatchPoolResizeContent +{ + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, +}; +Response response = await client.ResizePoolAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ResizePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolResizeContent content = new BatchPoolResizeContent(); +Response response = client.ResizePool("", content); +]]> +This sample shows how to call ResizePool with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolResizeContent content = new BatchPoolResizeContent +{ + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, +}; +Response response = client.ResizePool("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ResizePoolAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.ResizePoolAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ResizePoolAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", +}); +Response response = await client.ResizePoolAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ResizePool. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.ResizePool("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ResizePool with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", +}); +Response response = client.ResizePool("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call StopPoolResizeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.StopPoolResizeAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call StopPoolResizeAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.StopPoolResizeAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call StopPoolResize. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.StopPoolResize(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call StopPoolResize with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.StopPoolResize("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplacePoolPropertiesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] +{ + new BatchApplicationPackageReference("") +}, new MetadataItem[] +{ + new MetadataItem("", "") +}); +Response response = await client.ReplacePoolPropertiesAsync("", pool); +]]> +This sample shows how to call ReplacePoolPropertiesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] +{ + new BatchApplicationPackageReference("") + { + Version = "", + } +}, new MetadataItem[] +{ + new MetadataItem("", "") +}) +{ + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, +}; +Response response = await client.ReplacePoolPropertiesAsync("", pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call ReplacePoolProperties. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] +{ + new BatchApplicationPackageReference("") +}, new MetadataItem[] +{ + new MetadataItem("", "") +}); +Response response = client.ReplacePoolProperties("", pool); +]]> +This sample shows how to call ReplacePoolProperties with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] +{ + new BatchApplicationPackageReference("") + { + Version = "", + } +}, new MetadataItem[] +{ + new MetadataItem("", "") +}) +{ + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, +}; +Response response = client.ReplacePoolProperties("", pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call ReplacePoolPropertiesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, +}); +Response response = await client.ReplacePoolPropertiesAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplacePoolPropertiesAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + targetNodeCommunicationMode = "default", +}); +Response response = await client.ReplacePoolPropertiesAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplacePoolProperties. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, +}); +Response response = client.ReplacePoolProperties("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplacePoolProperties with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + targetNodeCommunicationMode = "default", +}); +Response response = client.ReplacePoolProperties("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call RemoveNodesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }); +Response response = await client.RemoveNodesAsync("", content); +]]> +This sample shows how to call RemoveNodesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }) +{ + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, +}; +Response response = await client.RemoveNodesAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call RemoveNodes. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }); +Response response = client.RemoveNodes("", content); +]]> +This sample shows how to call RemoveNodes with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }) +{ + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, +}; +Response response = client.RemoveNodes("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call RemoveNodesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeList = new object[] + { + "" + }, +}); +Response response = await client.RemoveNodesAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call RemoveNodesAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeList = new object[] + { + "" + }, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", +}); +Response response = await client.RemoveNodesAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call RemoveNodes. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeList = new object[] + { + "" + }, +}); +Response response = client.RemoveNodes("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call RemoveNodes with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeList = new object[] + { + "" + }, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", +}); +Response response = client.RemoveNodes("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteJobAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteJobAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteJob(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteJob with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteJob("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobAsync(""); +]]> +This sample shows how to call GetJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); +]]> + + + +This sample shows how to call GetJobAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobAsync("", null, null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("poolInfo").ToString()); +]]> +This sample shows how to call GetJobAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("usesTaskDependencies").ToString()); +Console.WriteLine(result.GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("eTag").ToString()); +Console.WriteLine(result.GetProperty("lastModified").ToString()); +Console.WriteLine(result.GetProperty("creationTime").ToString()); +Console.WriteLine(result.GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("previousState").ToString()); +Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("priority").ToString()); +Console.WriteLine(result.GetProperty("allowTaskPreemption").ToString()); +Console.WriteLine(result.GetProperty("maxParallelTasks").ToString()); +Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); +Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("poolId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); +Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); +Console.WriteLine(result.GetProperty("onAllTasksComplete").ToString()); +Console.WriteLine(result.GetProperty("onTaskFailure").ToString()); +Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); +Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("poolId").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("category").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("terminateReason").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); +]]> + + + +This sample shows how to call UpdateJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.UpdateJobAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdateJobAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + } + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { + null + }, +}); +Response response = await client.UpdateJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call UpdateJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.UpdateJob("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdateJob with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + } + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { + null + }, +}); +Response response = client.UpdateJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJob job = new BatchJob(new BatchPoolInfo()); +Response response = await client.ReplaceJobAsync("", job); +]]> +This sample shows how to call ReplaceJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJob job = new BatchJob(new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + Metadata = { default }, +}; +Response response = await client.ReplaceJobAsync("", job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ReplaceJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJob job = new BatchJob(new BatchPoolInfo()); +Response response = client.ReplaceJob("", job); +]]> +This sample shows how to call ReplaceJob with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJob job = new BatchJob(new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + Metadata = { default }, +}; +Response response = client.ReplaceJob("", job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ReplaceJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + poolInfo = new object(), +}); +Response response = await client.ReplaceJobAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceJobAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + } + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { + null + }, +}); +Response response = await client.ReplaceJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + poolInfo = new object(), +}); +Response response = client.ReplaceJob("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceJob with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + } + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { + null + }, +}); +Response response = client.ReplaceJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisableJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); +Response response = await client.DisableJobAsync("", content); +]]> +This sample shows how to call DisableJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); +Response response = await client.DisableJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call DisableJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); +Response response = client.DisableJob("", content); +]]> +This sample shows how to call DisableJob with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); +Response response = client.DisableJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call DisableJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + disableTasks = "requeue", +}); +Response response = await client.DisableJobAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisableJobAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + disableTasks = "requeue", +}); +Response response = await client.DisableJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisableJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + disableTasks = "requeue", +}); +Response response = client.DisableJob("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisableJob with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + disableTasks = "requeue", +}); +Response response = client.DisableJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnableJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.EnableJobAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnableJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.EnableJobAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnableJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.EnableJob(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnableJob with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.EnableJob("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call TerminateJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.TerminateJobAsync(""); +]]> +This sample shows how to call TerminateJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobTerminateContent parameters = new BatchJobTerminateContent +{ + TerminationReason = "", +}; +Response response = await client.TerminateJobAsync("", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call TerminateJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.TerminateJob(""); +]]> +This sample shows how to call TerminateJob with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobTerminateContent parameters = new BatchJobTerminateContent +{ + TerminationReason = "", +}; +Response response = client.TerminateJob("", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call TerminateJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = await client.TerminateJobAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call TerminateJobAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + terminateReason = "", +}); +Response response = await client.TerminateJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call TerminateJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = client.TerminateJob("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call TerminateJob with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + terminateReason = "", +}); +Response response = client.TerminateJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo()); +Response response = await client.CreateJobAsync(job); +]]> +This sample shows how to call CreateJobAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + DisplayName = "", + UsesTaskDependencies = true, + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Metadata = { default }, +}; +Response response = await client.CreateJobAsync(job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo()); +Response response = client.CreateJob(job); +]]> +This sample shows how to call CreateJob with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + DisplayName = "", + UsesTaskDependencies = true, + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Metadata = { default }, +}; +Response response = client.CreateJob(job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateJobAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + poolInfo = new object(), +}); +Response response = await client.CreateJobAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateJobAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + usesTaskDependencies = true, + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + metadata = new object[] + { + null + }, +}); +Response response = await client.CreateJobAsync(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateJob. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + poolInfo = new object(), +}); +Response response = client.CreateJob(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateJob with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + usesTaskDependencies = true, + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + metadata = new object[] + { + null + }, +}); +Response response = client.CreateJob(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetJobTaskCountsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobTaskCountsAsync(""); +]]> +This sample shows how to call GetJobTaskCountsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobTaskCountsAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call GetJobTaskCounts. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetJobTaskCounts(""); +]]> +This sample shows how to call GetJobTaskCounts with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetJobTaskCounts("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call GetJobTaskCountsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobTaskCountsAsync("", null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); +]]> +This sample shows how to call GetJobTaskCountsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobTaskCountsAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); +]]> + + + +This sample shows how to call GetJobTaskCounts and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetJobTaskCounts("", null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); +]]> +This sample shows how to call GetJobTaskCounts with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetJobTaskCounts("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); +Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); +]]> + + + +This sample shows how to call DeleteJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteJobScheduleAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteJobSchedule(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteJobSchedule with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobScheduleAsync(""); +]]> +This sample shows how to call GetJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); +]]> + + + +This sample shows how to call GetJobScheduleAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobScheduleAsync("", null, null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); +]]> +This sample shows how to call GetJobScheduleAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetJobScheduleAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("eTag").ToString()); +Console.WriteLine(result.GetProperty("lastModified").ToString()); +Console.WriteLine(result.GetProperty("creationTime").ToString()); +Console.WriteLine(result.GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("previousState").ToString()); +Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunUntil").ToString()); +Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunAfter").ToString()); +Console.WriteLine(result.GetProperty("schedule").GetProperty("startWindow").ToString()); +Console.WriteLine(result.GetProperty("schedule").GetProperty("recurrenceInterval").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("priority").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("allowTaskPreemption").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("maxParallelTasks").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("usesTaskDependencies").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onAllTasksComplete").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onTaskFailure").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("poolId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("nextRunTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); +Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); +]]> + + + +This sample shows how to call UpdateJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.UpdateJobScheduleAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdateJobScheduleAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { + null + }, + }, + metadata = new object[] + { + null + }, +}); +Response response = await client.UpdateJobScheduleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call UpdateJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.UpdateJobSchedule("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call UpdateJobSchedule with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { + null + }, + }, + metadata = new object[] + { + null + }, +}); +Response response = client.UpdateJobSchedule("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo())); +Response response = await client.ReplaceJobScheduleAsync("", jobSchedule); +]]> +This sample shows how to call ReplaceJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, +}) +{ + Schedule = new BatchJobScheduleConfiguration + { + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }, + Metadata = { default }, +}; +Response response = await client.ReplaceJobScheduleAsync("", jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ReplaceJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo())); +Response response = client.ReplaceJobSchedule("", jobSchedule); +]]> +This sample shows how to call ReplaceJobSchedule with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, +}) +{ + Schedule = new BatchJobScheduleConfiguration + { + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }, + Metadata = { default }, +}; +Response response = client.ReplaceJobSchedule("", jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ReplaceJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + jobSpecification = new + { + poolInfo = new object(), + }, +}); +Response response = await client.ReplaceJobScheduleAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceJobScheduleAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { + null + }, + }, + metadata = new object[] + { + null + }, +}); +Response response = await client.ReplaceJobScheduleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + jobSpecification = new + { + poolInfo = new object(), + }, +}); +Response response = client.ReplaceJobSchedule("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceJobSchedule with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { + null + }, + }, + metadata = new object[] + { + null + }, +}); +Response response = client.ReplaceJobSchedule("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisableJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DisableJobScheduleAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisableJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DisableJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisableJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DisableJobSchedule(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisableJobSchedule with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DisableJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnableJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.EnableJobScheduleAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnableJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.EnableJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnableJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.EnableJobSchedule(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnableJobSchedule with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.EnableJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call TerminateJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.TerminateJobScheduleAsync(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call TerminateJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.TerminateJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call TerminateJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.TerminateJobSchedule(""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call TerminateJobSchedule with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.TerminateJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration(), new BatchJobSpecification(new BatchPoolInfo())); +Response response = await client.CreateJobScheduleAsync(jobSchedule); +]]> +This sample shows how to call CreateJobScheduleAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration +{ + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), +}, new BatchJobSpecification(new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, +}) +{ + DisplayName = "", + Metadata = { default }, +}; +Response response = await client.CreateJobScheduleAsync(jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration(), new BatchJobSpecification(new BatchPoolInfo())); +Response response = client.CreateJobSchedule(jobSchedule); +]]> +This sample shows how to call CreateJobSchedule with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration +{ + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), +}, new BatchJobSpecification(new BatchPoolInfo +{ + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) + { + Caching = CachingType.None, + StorageAccountType = StorageAccountType.StandardLRS, + }}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") + { + TypeHandlerVersion = "", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + Settings = + { + ["key"] = "" + }, + ProtectedSettings = + { + ["key"] = "" + }, + ProvisionAfterExtensions = {""}, + }}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { + new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) + { + NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") + { + SourcePortRanges = {""}, + }}, + } + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") + { + ElevationLevel = ElevationLevel.NonAdmin, + LinuxUserConfiguration = new LinuxUserConfiguration + { + Uid = 1234, + Gid = 1234, + SshPrivateKey = "", + }, + WindowsUserConfiguration = new WindowsUserConfiguration + { + LoginMode = LoginMode.Batch, + }, + }}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration + { + AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") + { + AccountKey = "", + SasKey = "", + BlobfuseOptions = "", + IdentityReference = default, + }, + NfsMountConfiguration = new NfsMountConfiguration("", "") + { + MountOptions = "", + }, + CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") + { + MountOptions = "", + }, + AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") + { + MountOptions = "", + }, + }}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, +}) +{ + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, +}) +{ + DisplayName = "", + Metadata = { default }, +}; +Response response = client.CreateJobSchedule(jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateJobScheduleAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + schedule = new object(), + jobSpecification = new + { + poolInfo = new object(), + }, +}); +Response response = await client.CreateJobScheduleAsync(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateJobScheduleAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { + null + }, + }, + metadata = new object[] + { + null + }, +}); +Response response = await client.CreateJobScheduleAsync(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateJobSchedule. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + schedule = new object(), + jobSpecification = new + { + poolInfo = new object(), + }, +}); +Response response = client.CreateJobSchedule(content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateJobSchedule with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { + null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { + new + { + lun = 1234, + caching = "none", + diskSizeGB = 1234, + storageAccountType = "standard_lrs", + } + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { + "" + }, + containerRegistries = new object[] + { + null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { + "osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { + new + { + name = "", + publisher = "", + type = "", + typeHandlerVersion = "", + autoUpgradeMinorVersion = true, + enableAutomaticUpgrade = true, + settings = new + { + key = "", + }, + protectedSettings = new + { + key = "", + }, + provisionAfterExtensions = new object[] + { + "" + }, + } + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { + new + { + name = "", + protocol = "tcp", + backendPort = 1234, + frontendPortRangeStart = 1234, + frontendPortRangeEnd = 1234, + networkSecurityGroupRules = new object[] + { + new + { + priority = 1234, + access = "allow", + sourceAddressPrefix = "", + sourcePortRanges = new object[] + { + "" + }, + } + }, + } + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { + "" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { + null + }, + environmentSettings = new object[] + { + null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { + null + }, + userAccounts = new object[] + { + new + { + name = "", + password = "", + elevationLevel = "nonadmin", + linuxUserConfiguration = new + { + uid = 1234, + gid = 1234, + sshPrivateKey = "", + }, + windowsUserConfiguration = new + { + loginMode = "batch", + }, + } + }, + metadata = new object[] + { + new + { + name = "", + value = "", + } + }, + mountConfiguration = new object[] + { + new + { + azureBlobFileSystemConfiguration = new + { + accountName = "", + containerName = "", + accountKey = "", + sasKey = "", + blobfuseOptions = "", + relativeMountPath = "", + }, + nfsMountConfiguration = new + { + source = "", + relativeMountPath = "", + mountOptions = "", + }, + cifsMountConfiguration = new + { + username = "", + source = "", + relativeMountPath = "", + mountOptions = "", + password = "", + }, + azureFileShareConfiguration = new + { + accountName = "", + azureFileUrl = "", + accountKey = "", + relativeMountPath = "", + mountOptions = "", + }, + } + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { + null + }, + }, + metadata = new object[] + { + null + }, +}); +Response response = client.CreateJobSchedule(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("", ""); +Response response = await client.CreateTaskAsync("", task); +]]> +This sample shows how to call CreateTaskAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("", "") +{ + DisplayName = "", + ExitConditions = new ExitConditions + { + ExitCodes = {new ExitCodeMapping(1234, new ExitOptions + { + JobAction = BatchJobAction.None, + DependencyAction = DependencyAction.Satisfy, + })}, + ExitCodeRanges = { new ExitCodeRangeMapping(1234, 1234, default) }, + PreProcessingError = default, + FileUploadError = default, + Default = default, + }, + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + AffinityInfo = new AffinityInfo(""), + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MultiInstanceSettings = new MultiInstanceSettings("") + { + NumberOfInstances = 1234, + CommonResourceFiles = { default }, + }, + DependsOn = new BatchTaskDependencies + { + TaskIds = { "" }, + TaskIdRanges = { new BatchTaskIdRange(1234, 1234) }, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, +}; +Response response = await client.CreateTaskAsync("", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("", ""); +Response response = client.CreateTask("", task); +]]> +This sample shows how to call CreateTask with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskCreateContent task = new BatchTaskCreateContent("", "") +{ + DisplayName = "", + ExitConditions = new ExitConditions + { + ExitCodes = {new ExitCodeMapping(1234, new ExitOptions + { + JobAction = BatchJobAction.None, + DependencyAction = DependencyAction.Satisfy, + })}, + ExitCodeRanges = { new ExitCodeRangeMapping(1234, 1234, default) }, + PreProcessingError = default, + FileUploadError = default, + Default = default, + }, + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + AffinityInfo = new AffinityInfo(""), + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MultiInstanceSettings = new MultiInstanceSettings("") + { + NumberOfInstances = 1234, + CommonResourceFiles = { default }, + }, + DependsOn = new BatchTaskDependencies + { + TaskIds = { "" }, + TaskIdRanges = { new BatchTaskIdRange(1234, 1234) }, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, +}; +Response response = client.CreateTask("", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + commandLine = "", +}); +Response response = await client.CreateTaskAsync("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateTaskAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + exitConditions = new Dictionary + { + ["exitCodes"] = new object[] + { + new + { + code = 1234, + exitOptions = new + { + jobAction = "none", + dependencyAction = "satisfy", + }, + } + }, + ["exitCodeRanges"] = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + ["preProcessingError"] = null, + ["fileUploadError"] = null, + ["default"] = null + }, + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + affinityInfo = new + { + affinityId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + multiInstanceSettings = new + { + numberOfInstances = 1234, + coordinationCommandLine = "", + commonResourceFiles = new object[] + { + null + }, + }, + dependsOn = new + { + taskIds = new object[] + { + "" + }, + taskIdRanges = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, +}); +Response response = await client.CreateTaskAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + commandLine = "", +}); +Response response = client.CreateTask("", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateTask with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + id = "", + displayName = "", + exitConditions = new Dictionary + { + ["exitCodes"] = new object[] + { + new + { + code = 1234, + exitOptions = new + { + jobAction = "none", + dependencyAction = "satisfy", + }, + } + }, + ["exitCodeRanges"] = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + ["preProcessingError"] = null, + ["fileUploadError"] = null, + ["default"] = null + }, + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + affinityInfo = new + { + affinityId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + multiInstanceSettings = new + { + numberOfInstances = 1234, + coordinationCommandLine = "", + commonResourceFiles = new object[] + { + null + }, + }, + dependsOn = new + { + taskIds = new object[] + { + "" + }, + taskIdRanges = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, +}); +Response response = client.CreateTask("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateTaskCollectionAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +{ + new BatchTaskCreateContent("", "") +}); +Response response = await client.CreateTaskCollectionAsync("", taskCollection); +]]> +This sample shows how to call CreateTaskCollectionAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +{ + new BatchTaskCreateContent("", "") + { + DisplayName = "", + ExitConditions = new ExitConditions + { + ExitCodes = {new ExitCodeMapping(1234, new ExitOptions + { + JobAction = BatchJobAction.None, + DependencyAction = DependencyAction.Satisfy, + })}, + ExitCodeRanges = {new ExitCodeRangeMapping(1234, 1234, default)}, + PreProcessingError = default, + FileUploadError = default, + Default = default, + }, + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + AffinityInfo = new AffinityInfo(""), + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MultiInstanceSettings = new MultiInstanceSettings("") + { + NumberOfInstances = 1234, + CommonResourceFiles = {default}, + }, + DependsOn = new BatchTaskDependencies + { + TaskIds = {""}, + TaskIdRanges = {new BatchTaskIdRange(1234, 1234)}, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = {AccessScope.Job}, + }, + } +}); +Response response = await client.CreateTaskCollectionAsync("", taskCollection, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateTaskCollection. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +{ + new BatchTaskCreateContent("", "") +}); +Response response = client.CreateTaskCollection("", taskCollection); +]]> +This sample shows how to call CreateTaskCollection with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +{ + new BatchTaskCreateContent("", "") + { + DisplayName = "", + ExitConditions = new ExitConditions + { + ExitCodes = {new ExitCodeMapping(1234, new ExitOptions + { + JobAction = BatchJobAction.None, + DependencyAction = DependencyAction.Satisfy, + })}, + ExitCodeRanges = {new ExitCodeRangeMapping(1234, 1234, default)}, + PreProcessingError = default, + FileUploadError = default, + Default = default, + }, + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile + { + AutoStorageContainerName = "", + StorageContainerUrl = "", + HttpUrl = "", + BlobPrefix = "", + FilePath = "", + FileMode = "", + IdentityReference = default, + }}, + OutputFiles = {new OutputFile("", new OutputFileDestination + { + Container = new OutputFileBlobContainerDestination("") + { + Path = "", + IdentityReference = default, + UploadHeaders = {new HttpHeader("") + { + Value = "", + }}, + }, + }, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") + { + Value = "", + }}, + AffinityInfo = new AffinityInfo(""), + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MultiInstanceSettings = new MultiInstanceSettings("") + { + NumberOfInstances = 1234, + CommonResourceFiles = {default}, + }, + DependsOn = new BatchTaskDependencies + { + TaskIds = {""}, + TaskIdRanges = {new BatchTaskIdRange(1234, 1234)}, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") + { + Version = "", + }}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = {AccessScope.Job}, + }, + } +}); +Response response = client.CreateTaskCollection("", taskCollection, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateTaskCollectionAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + value = new object[] + { + new + { + id = "", + commandLine = "", + } + }, +}); +Response response = await client.CreateTaskCollectionAsync("", content); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call CreateTaskCollectionAsync with all parameters and request content and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + value = new object[] + { + new + { + id = "", + displayName = "", + exitConditions = new Dictionary + { + ["exitCodes"] = new object[] + { + new + { + code = 1234, + exitOptions = new + { + jobAction = "none", + dependencyAction = "satisfy", + }, + } + }, + ["exitCodeRanges"] = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + ["preProcessingError"] = null, + ["fileUploadError"] = null, + ["default"] = null + }, + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + affinityInfo = new + { + affinityId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + multiInstanceSettings = new + { + numberOfInstances = 1234, + coordinationCommandLine = "", + commonResourceFiles = new object[] + { + null + }, + }, + dependsOn = new + { + taskIds = new object[] + { + "" + }, + taskIdRanges = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + } + }, +}); +Response response = await client.CreateTaskCollectionAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("value")[0].GetProperty("status").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("taskId").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("eTag").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("lastModified").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("location").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("lang").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("key").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); +]]> + + + +This sample shows how to call CreateTaskCollection and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + value = new object[] + { + new + { + id = "", + commandLine = "", + } + }, +}); +Response response = client.CreateTaskCollection("", content); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call CreateTaskCollection with all parameters and request content and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + value = new object[] + { + new + { + id = "", + displayName = "", + exitConditions = new Dictionary + { + ["exitCodes"] = new object[] + { + new + { + code = 1234, + exitOptions = new + { + jobAction = "none", + dependencyAction = "satisfy", + }, + } + }, + ["exitCodeRanges"] = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + ["preProcessingError"] = null, + ["fileUploadError"] = null, + ["default"] = null + }, + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { + new + { + autoStorageContainerName = "", + storageContainerUrl = "", + httpUrl = "", + blobPrefix = "", + filePath = "", + fileMode = "", + } + }, + outputFiles = new object[] + { + new + { + filePattern = "", + destination = new + { + container = new + { + path = "", + containerUrl = "", + uploadHeaders = new object[] + { + new + { + name = "", + value = "", + } + }, + }, + }, + uploadOptions = new + { + uploadCondition = "tasksuccess", + }, + } + }, + environmentSettings = new object[] + { + new + { + name = "", + value = "", + } + }, + affinityInfo = new + { + affinityId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + multiInstanceSettings = new + { + numberOfInstances = 1234, + coordinationCommandLine = "", + commonResourceFiles = new object[] + { + null + }, + }, + dependsOn = new + { + taskIds = new object[] + { + "" + }, + taskIdRanges = new object[] + { + new + { + start = 1234, + end = 1234, + } + }, + }, + applicationPackageReferences = new object[] + { + new + { + applicationId = "", + version = "", + } + }, + authenticationTokenSettings = new + { + access = new object[] + { + "job" + }, + }, + } + }, +}); +Response response = client.CreateTaskCollection("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("value")[0].GetProperty("status").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("taskId").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("eTag").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("lastModified").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("location").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("lang").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("key").ToString()); +Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); +]]> + + + +This sample shows how to call DeleteTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteTaskAsync("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteTaskAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteTask("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteTask with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskAsync("", ""); +]]> +This sample shows how to call GetTaskAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); +]]> + + + +This sample shows how to call GetTaskAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskAsync("", "", null, null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetTaskAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("displayName").ToString()); +Console.WriteLine(result.GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("eTag").ToString()); +Console.WriteLine(result.GetProperty("lastModified").ToString()); +Console.WriteLine(result.GetProperty("creationTime").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("start").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("end").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("jobAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("dependencyAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("jobAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("dependencyAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("jobAction").ToString()); +Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("dependencyAction").ToString()); +Console.WriteLine(result.GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("previousState").ToString()); +Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); +Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("affinityInfo").GetProperty("affinityId").ToString()); +Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); +Console.WriteLine(result.GetProperty("constraints").GetProperty("retentionTime").ToString()); +Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("requiredSlots").ToString()); +Console.WriteLine(result.GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("exitCode").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("retryCount").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("requeueCount").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); +Console.WriteLine(result.GetProperty("executionInfo").GetProperty("result").ToString()); +Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("affinityId").ToString()); +Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeUrl").ToString()); +Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("poolId").ToString()); +Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeId").ToString()); +Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectory").ToString()); +Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectoryUrl").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("numberOfInstances").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("coordinationCommandLine").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); +Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); +Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIds")[0].ToString()); +Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("start").ToString()); +Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("end").ToString()); +Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); +Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); +]]> + + + +This sample shows how to call ReplaceTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTask task = new BatchTask(); +Response response = await client.ReplaceTaskAsync("", "", task); +]]> +This sample shows how to call ReplaceTaskAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTask task = new BatchTask +{ + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, +}; +Response response = await client.ReplaceTaskAsync("", "", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ReplaceTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTask task = new BatchTask(); +Response response = client.ReplaceTask("", "", task); +]]> +This sample shows how to call ReplaceTask with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchTask task = new BatchTask +{ + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, +}; +Response response = client.ReplaceTask("", "", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); +]]> + + + +This sample shows how to call ReplaceTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.ReplaceTaskAsync("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceTaskAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, +}); +Response response = await client.ReplaceTaskAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.ReplaceTask("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceTask with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, +}); +Response response = client.ReplaceTask("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call TerminateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.TerminateTaskAsync("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call TerminateTaskAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.TerminateTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call TerminateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.TerminateTask("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call TerminateTask with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.TerminateTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReactivateTaskAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.ReactivateTaskAsync("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReactivateTaskAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.ReactivateTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReactivateTask. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.ReactivateTask("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReactivateTask with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.ReactivateTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteTaskFileAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteTaskFileAsync("", "", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteTaskFileAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteTaskFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteTaskFile. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteTaskFile("", "", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteTaskFile with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteTaskFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetTaskFileAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskFileAsync("", "", ""); +]]> +This sample shows how to call GetTaskFileAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); +]]> + + + +This sample shows how to call GetTaskFile. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetTaskFile("", "", ""); +]]> +This sample shows how to call GetTaskFile with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetTaskFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); +]]> + + + +This sample shows how to call GetTaskFileAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskFileAsync("", "", "", null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetTaskFileAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetTaskFileAsync("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> + + + +This sample shows how to call GetTaskFile and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetTaskFile("", "", "", null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetTaskFile with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetTaskFile("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> + + + +This sample shows how to call CreateNodeUserAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserCreateContent user = new BatchNodeUserCreateContent(""); +Response response = await client.CreateNodeUserAsync("", "", user); +]]> +This sample shows how to call CreateNodeUserAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("") +{ + IsAdmin = true, + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + Password = "", + SshPublicKey = "", +}; +Response response = await client.CreateNodeUserAsync("", "", user, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateNodeUser. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserCreateContent user = new BatchNodeUserCreateContent(""); +Response response = client.CreateNodeUser("", "", user); +]]> +This sample shows how to call CreateNodeUser with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("") +{ + IsAdmin = true, + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + Password = "", + SshPublicKey = "", +}; +Response response = client.CreateNodeUser("", "", user, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call CreateNodeUserAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + name = "", +}); +Response response = await client.CreateNodeUserAsync("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateNodeUserAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + name = "", + isAdmin = true, + expiryTime = "2022-05-10T18:57:31.2311892Z", + password = "", + sshPublicKey = "", +}); +Response response = await client.CreateNodeUserAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call CreateNodeUser. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + name = "", +}); +Response response = client.CreateNodeUser("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call CreateNodeUser with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + name = "", + isAdmin = true, + expiryTime = "2022-05-10T18:57:31.2311892Z", + password = "", + sshPublicKey = "", +}); +Response response = client.CreateNodeUser("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteNodeUserAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteNodeUserAsync("", "", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteNodeUserAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteNodeUserAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteNodeUser. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteNodeUser("", "", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteNodeUser with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteNodeUser("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceNodeUserAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent(); +Response response = await client.ReplaceNodeUserAsync("", "", "", content); +]]> +This sample shows how to call ReplaceNodeUserAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent +{ + Password = "", + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + SshPublicKey = "", +}; +Response response = await client.ReplaceNodeUserAsync("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call ReplaceNodeUser. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent(); +Response response = client.ReplaceNodeUser("", "", "", content); +]]> +This sample shows how to call ReplaceNodeUser with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent +{ + Password = "", + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + SshPublicKey = "", +}; +Response response = client.ReplaceNodeUser("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call ReplaceNodeUserAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = await client.ReplaceNodeUserAsync("", "", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceNodeUserAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + password = "", + expiryTime = "2022-05-10T18:57:31.2311892Z", + sshPublicKey = "", +}); +Response response = await client.ReplaceNodeUserAsync("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call ReplaceNodeUser. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new object()); +Response response = client.ReplaceNodeUser("", "", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call ReplaceNodeUser with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + password = "", + expiryTime = "2022-05-10T18:57:31.2311892Z", + sshPublicKey = "", +}); +Response response = client.ReplaceNodeUser("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeAsync("", ""); +]]> +This sample shows how to call GetNodeAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }); +]]> + + + +This sample shows how to call GetNodeAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeAsync("", "", null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetNodeAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("id").ToString()); +Console.WriteLine(result.GetProperty("url").ToString()); +Console.WriteLine(result.GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("schedulingState").ToString()); +Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); +Console.WriteLine(result.GetProperty("lastBootTime").ToString()); +Console.WriteLine(result.GetProperty("allocationTime").ToString()); +Console.WriteLine(result.GetProperty("ipAddress").ToString()); +Console.WriteLine(result.GetProperty("affinityId").ToString()); +Console.WriteLine(result.GetProperty("vmSize").ToString()); +Console.WriteLine(result.GetProperty("totalTasksRun").ToString()); +Console.WriteLine(result.GetProperty("runningTasksCount").ToString()); +Console.WriteLine(result.GetProperty("runningTaskSlotsCount").ToString()); +Console.WriteLine(result.GetProperty("totalTasksSucceeded").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskUrl").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("jobId").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskId").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("subtaskId").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskState").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("endTime").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("exitCode").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("retryCount").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("requeueCount").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); +Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("result").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); +Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("startTime").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("endTime").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("exitCode").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("state").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("error").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("category").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("retryCount").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("lastRetryTime").ToString()); +Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("result").ToString()); +Console.WriteLine(result.GetProperty("errors")[0].GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("errors")[0].GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("value").ToString()); +Console.WriteLine(result.GetProperty("isDedicated").ToString()); +Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("protocol").ToString()); +Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicIPAddress").ToString()); +Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicFQDN").ToString()); +Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("frontendPort").ToString()); +Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("backendPort").ToString()); +Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("lastUpdateTime").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("offer").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("sku").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("version").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("exactVersion").ToString()); +Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("scaleSetVmResourceId").ToString()); +]]> + + + +This sample shows how to call RebootNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.RebootNodeAsync("", ""); +]]> +This sample shows how to call RebootNodeAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeRebootContent parameters = new BatchNodeRebootContent +{ + NodeRebootOption = BatchNodeRebootOption.Requeue, +}; +Response response = await client.RebootNodeAsync("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call RebootNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.RebootNode("", ""); +]]> +This sample shows how to call RebootNode with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeRebootContent parameters = new BatchNodeRebootContent +{ + NodeRebootOption = BatchNodeRebootOption.Requeue, +}; +Response response = client.RebootNode("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call RebootNodeAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = await client.RebootNodeAsync("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call RebootNodeAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeRebootOption = "requeue", +}); +Response response = await client.RebootNodeAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call RebootNode. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = client.RebootNode("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call RebootNode with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeRebootOption = "requeue", +}); +Response response = client.RebootNode("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisableNodeSchedulingAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DisableNodeSchedulingAsync("", ""); +]]> +This sample shows how to call DisableNodeSchedulingAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeDisableSchedulingContent parameters = new BatchNodeDisableSchedulingContent +{ + NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.Requeue, +}; +Response response = await client.DisableNodeSchedulingAsync("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call DisableNodeScheduling. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DisableNodeScheduling("", ""); +]]> +This sample shows how to call DisableNodeScheduling with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +BatchNodeDisableSchedulingContent parameters = new BatchNodeDisableSchedulingContent +{ + NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.Requeue, +}; +Response response = client.DisableNodeScheduling("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call DisableNodeSchedulingAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = await client.DisableNodeSchedulingAsync("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisableNodeSchedulingAsync with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeDisableSchedulingOption = "requeue", +}); +Response response = await client.DisableNodeSchedulingAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DisableNodeScheduling. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = null; +Response response = client.DisableNodeScheduling("", "", content); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DisableNodeScheduling with all parameters and request content. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + nodeDisableSchedulingOption = "requeue", +}); +Response response = client.DisableNodeScheduling("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnableNodeSchedulingAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.EnableNodeSchedulingAsync("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnableNodeSchedulingAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.EnableNodeSchedulingAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call EnableNodeScheduling. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.EnableNodeScheduling("", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call EnableNodeScheduling with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.EnableNodeScheduling("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetNodeRemoteLoginSettingsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeRemoteLoginSettingsAsync("", ""); +]]> +This sample shows how to call GetNodeRemoteLoginSettingsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeRemoteLoginSettingsAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call GetNodeRemoteLoginSettings. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeRemoteLoginSettings("", ""); +]]> +This sample shows how to call GetNodeRemoteLoginSettings with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeRemoteLoginSettings("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call GetNodeRemoteLoginSettingsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeRemoteLoginSettingsAsync("", "", null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); +Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); +]]> +This sample shows how to call GetNodeRemoteLoginSettingsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeRemoteLoginSettingsAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); +Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); +]]> + + + +This sample shows how to call GetNodeRemoteLoginSettings and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeRemoteLoginSettings("", "", null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); +Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); +]]> +This sample shows how to call GetNodeRemoteLoginSettings with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeRemoteLoginSettings("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); +Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); +]]> + + + +This sample shows how to call UploadNodeLogsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")); +Response response = await client.UploadNodeLogsAsync("", "", content); +]]> +This sample shows how to call UploadNodeLogsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")) +{ + EndTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, +}; +Response response = await client.UploadNodeLogsAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call UploadNodeLogs. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")); +Response response = client.UploadNodeLogs("", "", content); +]]> +This sample shows how to call UploadNodeLogs with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")) +{ + EndTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, +}; +Response response = client.UploadNodeLogs("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); +]]> + + + +This sample shows how to call UploadNodeLogsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", +}); +Response response = await client.UploadNodeLogsAsync("", "", content); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); +Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); +]]> +This sample shows how to call UploadNodeLogsAsync with all parameters and request content and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", + endTime = "2022-05-10T18:57:31.2311892Z", + identityReference = new + { + resourceId = "", + }, +}); +Response response = await client.UploadNodeLogsAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); +Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); +]]> + + + +This sample shows how to call UploadNodeLogs and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", +}); +Response response = client.UploadNodeLogs("", "", content); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); +Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); +]]> +This sample shows how to call UploadNodeLogs with all parameters and request content and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +using RequestContent content = RequestContent.Create(new +{ + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", + endTime = "2022-05-10T18:57:31.2311892Z", + identityReference = new + { + resourceId = "", + }, +}); +Response response = client.UploadNodeLogs("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); +Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); +]]> + + + +This sample shows how to call GetNodeExtensionAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeExtensionAsync("", "", ""); +]]> +This sample shows how to call GetNodeExtensionAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeExtensionAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }); +]]> + + + +This sample shows how to call GetNodeExtensionAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeExtensionAsync("", "", "", null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetNodeExtensionAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeExtensionAsync("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.GetProperty("provisioningState").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("publisher").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("type").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("typeHandlerVersion").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("autoUpgradeMinorVersion").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("enableAutomaticUpgrade").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("settings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("protectedSettings").GetProperty("").ToString()); +Console.WriteLine(result.GetProperty("vmExtension").GetProperty("provisionAfterExtensions")[0].ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("name").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("displayStatus").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("level").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("time").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("code").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("displayStatus").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("level").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("message").ToString()); +Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("time").ToString()); +]]> + + + +This sample shows how to call DeleteNodeFileAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteNodeFileAsync("", "", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteNodeFileAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.DeleteNodeFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call DeleteNodeFile. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteNodeFile("", "", ""); + +Console.WriteLine(response.Status); +]]> +This sample shows how to call DeleteNodeFile with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.DeleteNodeFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + +Console.WriteLine(response.Status); +]]> + + + +This sample shows how to call GetNodeFileAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeFileAsync("", "", ""); +]]> +This sample shows how to call GetNodeFileAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); +]]> + + + +This sample shows how to call GetNodeFile. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeFile("", "", ""); +]]> +This sample shows how to call GetNodeFile with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); +]]> + + + +This sample shows how to call GetNodeFileAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeFileAsync("", "", "", null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetNodeFileAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = await client.GetNodeFileAsync("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> + + + +This sample shows how to call GetNodeFile and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeFile("", "", "", null, null, null, null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> +This sample shows how to call GetNodeFile with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +Response response = client.GetNodeFile("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + +JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; +Console.WriteLine(result.ToString()); +]]> + + + +This sample shows how to call GetApplicationsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchApplication item in client.GetApplicationsAsync()) +{ +} +]]> +This sample shows how to call GetApplicationsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchApplication item in client.GetApplicationsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234)) +{ +} +]]> + + + +This sample shows how to call GetApplications. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchApplication item in client.GetApplications()) +{ +} +]]> +This sample shows how to call GetApplications with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchApplication item in client.GetApplications(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234)) +{ +} +]]> + + + +This sample shows how to call GetApplicationsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetApplicationsAsync(null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); +} +]]> +This sample shows how to call GetApplicationsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetApplicationsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); +} +]]> + + + +This sample shows how to call GetApplications and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetApplications(null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); +} +]]> +This sample shows how to call GetApplications with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetApplications(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); +} +]]> + + + +This sample shows how to call GetPoolUsageMetricsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetricsAsync()) +{ +} +]]> +This sample shows how to call GetPoolUsageMetricsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetricsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, starttime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), endtime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), filter: "")) +{ +} +]]> + + + +This sample shows how to call GetPoolUsageMetrics. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetrics()) +{ +} +]]> +This sample shows how to call GetPoolUsageMetrics with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetrics(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, starttime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), endtime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), filter: "")) +{ +} +]]> + + + +This sample shows how to call GetPoolUsageMetricsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); +} +]]> +This sample shows how to call GetPoolUsageMetricsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), "", null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); +} +]]> + + + +This sample shows how to call GetPoolUsageMetrics and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetPoolUsageMetrics(null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); +} +]]> +This sample shows how to call GetPoolUsageMetrics with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetPoolUsageMetrics(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), "", null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); +} +]]> + + + +This sample shows how to call GetPoolsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchPool item in client.GetPoolsAsync()) +{ +} +]]> +This sample shows how to call GetPoolsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchPool item in client.GetPoolsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetPoolsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetPoolsAsync(null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetPoolsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetPoolsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("allocationState").ToString()); + Console.WriteLine(result.GetProperty("allocationStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("resourceTags").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("currentDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("currentLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("timestamp").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("results").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("dedicatedCoreTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgCPUPercentage").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgMemoryGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakMemoryGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgDiskGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakDiskGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkReadGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkWriteGiB").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("clientId").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("principalId").ToString()); + Console.WriteLine(result.GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("currentNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); +} +]]> + + + +This sample shows how to call GetSupportedImagesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchSupportedImage item in client.GetSupportedImagesAsync()) +{ +} +]]> +This sample shows how to call GetSupportedImagesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchSupportedImage item in client.GetSupportedImagesAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) +{ +} +]]> + + + +This sample shows how to call GetSupportedImages. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchSupportedImage item in client.GetSupportedImages()) +{ +} +]]> +This sample shows how to call GetSupportedImages with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchSupportedImage item in client.GetSupportedImages(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) +{ +} +]]> + + + +This sample shows how to call GetSupportedImagesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetSupportedImagesAsync(null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); +} +]]> +This sample shows how to call GetSupportedImagesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetSupportedImagesAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("capabilities")[0].ToString()); + Console.WriteLine(result.GetProperty("batchSupportEndOfLife").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); +} +]]> + + + +This sample shows how to call GetSupportedImages and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetSupportedImages(null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); +} +]]> +This sample shows how to call GetSupportedImages with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetSupportedImages(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("capabilities")[0].ToString()); + Console.WriteLine(result.GetProperty("batchSupportEndOfLife").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); +} +]]> + + + +This sample shows how to call GetPoolNodeCountsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchPoolNodeCounts item in client.GetPoolNodeCountsAsync()) +{ +} +]]> +This sample shows how to call GetPoolNodeCountsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchPoolNodeCounts item in client.GetPoolNodeCountsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) +{ +} +]]> + + + +This sample shows how to call GetPoolNodeCounts. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchPoolNodeCounts item in client.GetPoolNodeCounts()) +{ +} +]]> +This sample shows how to call GetPoolNodeCounts with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchPoolNodeCounts item in client.GetPoolNodeCounts(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) +{ +} +]]> + + + +This sample shows how to call GetPoolNodeCountsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetPoolNodeCountsAsync(null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); +} +]]> +This sample shows how to call GetPoolNodeCountsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetPoolNodeCountsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("upgradingOS").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("upgradingOS").ToString()); +} +]]> + + + +This sample shows how to call GetPoolNodeCounts and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetPoolNodeCounts(null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); +} +]]> +This sample shows how to call GetPoolNodeCounts with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetPoolNodeCounts(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("upgradingOS").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("upgradingOS").ToString()); +} +]]> + + + +This sample shows how to call GetJobsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJob item in client.GetJobsAsync()) +{ +} +]]> +This sample shows how to call GetJobsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJob item in client.GetJobsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetJobsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobsAsync(null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); +} +]]> +This sample shows how to call GetJobsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("terminateReason").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); +} +]]> + + + +This sample shows how to call GetJobsFromSchedulesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJob item in client.GetJobsFromSchedulesAsync("")) +{ +} +]]> +This sample shows how to call GetJobsFromSchedulesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJob item in client.GetJobsFromSchedulesAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetJobsFromSchedulesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("", null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); +} +]]> +This sample shows how to call GetJobsFromSchedulesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("terminateReason").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); +} +]]> + + + +This sample shows how to call GetJobPreparationAndReleaseTaskStatusesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJobPreparationAndReleaseTaskStatus item in client.GetJobPreparationAndReleaseTaskStatusesAsync("")) +{ +} +]]> +This sample shows how to call GetJobPreparationAndReleaseTaskStatusesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJobPreparationAndReleaseTaskStatus item in client.GetJobPreparationAndReleaseTaskStatusesAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetJobPreparationAndReleaseTaskStatusesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("", null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetJobPreparationAndReleaseTaskStatusesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("result").ToString()); +} +]]> + + + +This sample shows how to call GetJobSchedulesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJobSchedule item in client.GetJobSchedulesAsync()) +{ +} +]]> +This sample shows how to call GetJobSchedulesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchJobSchedule item in client.GetJobSchedulesAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetJobSchedulesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobSchedulesAsync(null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); +} +]]> +This sample shows how to call GetJobSchedulesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetJobSchedulesAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunUntil").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunAfter").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("startWindow").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("recurrenceInterval").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("nextRunTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); +} +]]> + + + +This sample shows how to call GetTasksAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchTask item in client.GetTasksAsync("")) +{ +} +]]> +This sample shows how to call GetTasksAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchTask item in client.GetTasksAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetTasksAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetTasksAsync("", null, null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetTasksAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetTasksAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("start").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("end").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("affinityInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("requeueCount").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("numberOfInstances").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("coordinationCommandLine").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIds")[0].ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("start").ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("end").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); +} +]]> + + + +This sample shows how to call GetSubTasksAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchSubtask item in client.GetSubTasksAsync("", "")) +{ +} +]]> +This sample shows how to call GetSubTasksAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchSubtask item in client.GetSubTasksAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetSubTasksAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetSubTasksAsync("", "", null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetSubTasksAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetSubTasksAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("result").ToString()); +} +]]> + + + +This sample shows how to call GetTaskFilesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNodeFile item in client.GetTaskFilesAsync("", "")) +{ +} +]]> +This sample shows how to call GetTaskFilesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNodeFile item in client.GetTaskFilesAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) +{ +} +]]> + + + +This sample shows how to call GetTaskFiles. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchNodeFile item in client.GetTaskFiles("", "")) +{ +} +]]> +This sample shows how to call GetTaskFiles with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchNodeFile item in client.GetTaskFiles("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) +{ +} +]]> + + + +This sample shows how to call GetTaskFilesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetTaskFilesAsync("", "", null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetTaskFilesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetTaskFilesAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); +} +]]> + + + +This sample shows how to call GetTaskFiles and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetTaskFiles("", "", null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetTaskFiles with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetTaskFiles("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); +} +]]> + + + +This sample shows how to call GetNodesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNode item in client.GetNodesAsync("")) +{ +} +]]> +This sample shows how to call GetNodesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNode item in client.GetNodesAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetNodesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetNodesAsync("", null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetNodesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetNodesAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("schedulingState").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("lastBootTime").ToString()); + Console.WriteLine(result.GetProperty("allocationTime").ToString()); + Console.WriteLine(result.GetProperty("ipAddress").ToString()); + Console.WriteLine(result.GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalTasksRun").ToString()); + Console.WriteLine(result.GetProperty("runningTasksCount").ToString()); + Console.WriteLine(result.GetProperty("runningTaskSlotsCount").ToString()); + Console.WriteLine(result.GetProperty("totalTasksSucceeded").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskUrl").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("jobId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("subtaskId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskState").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("requeueCount").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("isDedicated").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicIPAddress").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicFQDN").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("frontendPort").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("scaleSetVmResourceId").ToString()); +} +]]> + + + +This sample shows how to call GetNodeExtensionsAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNodeVMExtension item in client.GetNodeExtensionsAsync("", "")) +{ +} +]]> +This sample shows how to call GetNodeExtensionsAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNodeVMExtension item in client.GetNodeExtensionsAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, select: new string[] { "" })) +{ +} +]]> + + + +This sample shows how to call GetNodeExtensionsAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetNodeExtensionsAsync("", "", null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetNodeExtensionsAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetNodeExtensionsAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, new string[] { "" }, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("provisioningState").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("displayStatus").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("level").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("time").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("displayStatus").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("level").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("time").ToString()); +} +]]> + + + +This sample shows how to call GetNodeFilesAsync. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNodeFile item in client.GetNodeFilesAsync("", "")) +{ +} +]]> +This sample shows how to call GetNodeFilesAsync with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BatchNodeFile item in client.GetNodeFilesAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) +{ +} +]]> + + + +This sample shows how to call GetNodeFiles. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchNodeFile item in client.GetNodeFiles("", "")) +{ +} +]]> +This sample shows how to call GetNodeFiles with all parameters. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BatchNodeFile item in client.GetNodeFiles("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) +{ +} +]]> + + + +This sample shows how to call GetNodeFilesAsync and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetNodeFilesAsync("", "", null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetNodeFilesAsync with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +await foreach (BinaryData item in client.GetNodeFilesAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); +} +]]> + + + +This sample shows how to call GetNodeFiles and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetNodeFiles("", "", null, null, null, null, null, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); +} +]]> +This sample shows how to call GetNodeFiles with all parameters and parse the result. +"); +TokenCredential credential = new DefaultAzureCredential(); +BatchClient client = new BatchClient(endpoint, credential); + +foreach (BinaryData item in client.GetNodeFiles("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) +{ + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); +} +]]> + + + \ No newline at end of file diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DynamicVNetAssignmentScope.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DynamicVNetAssignmentScope.cs new file mode 100644 index 0000000000000..d67c921b202cb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DynamicVNetAssignmentScope.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// DynamicVNetAssignmentScope enums. + public readonly partial struct DynamicVNetAssignmentScope : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public DynamicVNetAssignmentScope(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoneValue = "none"; + private const string JobValue = "job"; + + /// No dynamic VNet assignment is enabled. + public static DynamicVNetAssignmentScope None { get; } = new DynamicVNetAssignmentScope(NoneValue); + /// Dynamic VNet assignment is done per-job. + public static DynamicVNetAssignmentScope Job { get; } = new DynamicVNetAssignmentScope(JobValue); + /// Determines if two values are the same. + public static bool operator ==(DynamicVNetAssignmentScope left, DynamicVNetAssignmentScope right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(DynamicVNetAssignmentScope left, DynamicVNetAssignmentScope right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator DynamicVNetAssignmentScope(string value) => new DynamicVNetAssignmentScope(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is DynamicVNetAssignmentScope other && Equals(other); + /// + public bool Equals(DynamicVNetAssignmentScope other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ElevationLevel.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ElevationLevel.cs new file mode 100644 index 0000000000000..6840f3956c689 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ElevationLevel.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// ElevationLevel enums. + public readonly partial struct ElevationLevel : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ElevationLevel(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NonAdminValue = "nonadmin"; + private const string AdminValue = "admin"; + + /// The user is a standard user without elevated access. + public static ElevationLevel NonAdmin { get; } = new ElevationLevel(NonAdminValue); + /// The user is a user with elevated access and operates with full Administrator permissions. + public static ElevationLevel Admin { get; } = new ElevationLevel(AdminValue); + /// Determines if two values are the same. + public static bool operator ==(ElevationLevel left, ElevationLevel right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ElevationLevel left, ElevationLevel right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ElevationLevel(string value) => new ElevationLevel(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ElevationLevel other && Equals(other); + /// + public bool Equals(ElevationLevel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/EnvironmentSetting.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/EnvironmentSetting.Serialization.cs new file mode 100644 index 0000000000000..5394104578495 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/EnvironmentSetting.Serialization.cs @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class EnvironmentSetting : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(EnvironmentSetting)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (Optional.IsDefined(Value)) + { + writer.WritePropertyName("value"u8); + writer.WriteStringValue(Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + EnvironmentSetting IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(EnvironmentSetting)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeEnvironmentSetting(document.RootElement, options); + } + + internal static EnvironmentSetting DeserializeEnvironmentSetting(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("value"u8)) + { + value = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new EnvironmentSetting(name, value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(EnvironmentSetting)} does not support writing '{options.Format}' format."); + } + } + + EnvironmentSetting IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeEnvironmentSetting(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(EnvironmentSetting)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static EnvironmentSetting FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeEnvironmentSetting(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/EnvironmentSetting.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/EnvironmentSetting.cs new file mode 100644 index 0000000000000..bde5afca2aa79 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/EnvironmentSetting.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An environment variable to be set on a Task process. + public partial class EnvironmentSetting + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the environment variable. + /// is null. + public EnvironmentSetting(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The name of the environment variable. + /// The value of the environment variable. + /// Keeps track of any properties unknown to the library. + internal EnvironmentSetting(string name, string value, IDictionary serializedAdditionalRawData) + { + Name = name; + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal EnvironmentSetting() + { + } + + /// The name of the environment variable. + public string Name { get; set; } + /// The value of the environment variable. + public string Value { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ErrorCategory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ErrorCategory.cs new file mode 100644 index 0000000000000..438725520fe46 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ErrorCategory.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// ErrorCategory enums. + public readonly partial struct ErrorCategory : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ErrorCategory(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserErrorValue = "usererror"; + private const string ServerErrorValue = "servererror"; + + /// The error is due to a user issue, such as misconfiguration. + public static ErrorCategory UserError { get; } = new ErrorCategory(UserErrorValue); + /// The error is due to an internal server issue. + public static ErrorCategory ServerError { get; } = new ErrorCategory(ServerErrorValue); + /// Determines if two values are the same. + public static bool operator ==(ErrorCategory left, ErrorCategory right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ErrorCategory left, ErrorCategory right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ErrorCategory(string value) => new ErrorCategory(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ErrorCategory other && Equals(other); + /// + public bool Equals(ErrorCategory other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeMapping.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeMapping.Serialization.cs new file mode 100644 index 0000000000000..9ec340db7bde9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeMapping.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ExitCodeMapping : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitCodeMapping)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("code"u8); + writer.WriteNumberValue(Code); + writer.WritePropertyName("exitOptions"u8); + writer.WriteObjectValue(ExitOptions, options); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ExitCodeMapping IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitCodeMapping)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeExitCodeMapping(document.RootElement, options); + } + + internal static ExitCodeMapping DeserializeExitCodeMapping(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int code = default; + ExitOptions exitOptions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("exitOptions"u8)) + { + exitOptions = ExitOptions.DeserializeExitOptions(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ExitCodeMapping(code, exitOptions, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ExitCodeMapping)} does not support writing '{options.Format}' format."); + } + } + + ExitCodeMapping IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeExitCodeMapping(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ExitCodeMapping)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ExitCodeMapping FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeExitCodeMapping(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeMapping.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeMapping.cs new file mode 100644 index 0000000000000..8f7079c931814 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeMapping.cs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// How the Batch service should respond if a Task exits with a particular exit + /// code. + /// + public partial class ExitCodeMapping + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A process exit code. + /// How the Batch service should respond if the Task exits with this exit code. + /// is null. + public ExitCodeMapping(int code, ExitOptions exitOptions) + { + Argument.AssertNotNull(exitOptions, nameof(exitOptions)); + + Code = code; + ExitOptions = exitOptions; + } + + /// Initializes a new instance of . + /// A process exit code. + /// How the Batch service should respond if the Task exits with this exit code. + /// Keeps track of any properties unknown to the library. + internal ExitCodeMapping(int code, ExitOptions exitOptions, IDictionary serializedAdditionalRawData) + { + Code = code; + ExitOptions = exitOptions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ExitCodeMapping() + { + } + + /// A process exit code. + public int Code { get; set; } + /// How the Batch service should respond if the Task exits with this exit code. + public ExitOptions ExitOptions { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeRangeMapping.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeRangeMapping.Serialization.cs new file mode 100644 index 0000000000000..d7ee046886d24 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeRangeMapping.Serialization.cs @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ExitCodeRangeMapping : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitCodeRangeMapping)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("start"u8); + writer.WriteNumberValue(Start); + writer.WritePropertyName("end"u8); + writer.WriteNumberValue(End); + writer.WritePropertyName("exitOptions"u8); + writer.WriteObjectValue(ExitOptions, options); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ExitCodeRangeMapping IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitCodeRangeMapping)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeExitCodeRangeMapping(document.RootElement, options); + } + + internal static ExitCodeRangeMapping DeserializeExitCodeRangeMapping(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int start = default; + int end = default; + ExitOptions exitOptions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("start"u8)) + { + start = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("end"u8)) + { + end = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("exitOptions"u8)) + { + exitOptions = ExitOptions.DeserializeExitOptions(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ExitCodeRangeMapping(start, end, exitOptions, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ExitCodeRangeMapping)} does not support writing '{options.Format}' format."); + } + } + + ExitCodeRangeMapping IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeExitCodeRangeMapping(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ExitCodeRangeMapping)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ExitCodeRangeMapping FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeExitCodeRangeMapping(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeRangeMapping.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeRangeMapping.cs new file mode 100644 index 0000000000000..62ff19d384397 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitCodeRangeMapping.cs @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A range of exit codes and how the Batch service should respond to exit codes + /// within that range. + /// + public partial class ExitCodeRangeMapping + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The first exit code in the range. + /// The last exit code in the range. + /// How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). + /// is null. + public ExitCodeRangeMapping(int start, int end, ExitOptions exitOptions) + { + Argument.AssertNotNull(exitOptions, nameof(exitOptions)); + + Start = start; + End = end; + ExitOptions = exitOptions; + } + + /// Initializes a new instance of . + /// The first exit code in the range. + /// The last exit code in the range. + /// How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). + /// Keeps track of any properties unknown to the library. + internal ExitCodeRangeMapping(int start, int end, ExitOptions exitOptions, IDictionary serializedAdditionalRawData) + { + Start = start; + End = end; + ExitOptions = exitOptions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ExitCodeRangeMapping() + { + } + + /// The first exit code in the range. + public int Start { get; set; } + /// The last exit code in the range. + public int End { get; set; } + /// How the Batch service should respond if the Task exits with an exit code in the range start to end (inclusive). + public ExitOptions ExitOptions { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitConditions.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitConditions.Serialization.cs new file mode 100644 index 0000000000000..68222969f5684 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitConditions.Serialization.cs @@ -0,0 +1,228 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ExitConditions : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitConditions)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsCollectionDefined(ExitCodes)) + { + writer.WritePropertyName("exitCodes"u8); + writer.WriteStartArray(); + foreach (var item in ExitCodes) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(ExitCodeRanges)) + { + writer.WritePropertyName("exitCodeRanges"u8); + writer.WriteStartArray(); + foreach (var item in ExitCodeRanges) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(PreProcessingError)) + { + writer.WritePropertyName("preProcessingError"u8); + writer.WriteObjectValue(PreProcessingError, options); + } + if (Optional.IsDefined(FileUploadError)) + { + writer.WritePropertyName("fileUploadError"u8); + writer.WriteObjectValue(FileUploadError, options); + } + if (Optional.IsDefined(Default)) + { + writer.WritePropertyName("default"u8); + writer.WriteObjectValue(Default, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ExitConditions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitConditions)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeExitConditions(document.RootElement, options); + } + + internal static ExitConditions DeserializeExitConditions(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IList exitCodes = default; + IList exitCodeRanges = default; + ExitOptions preProcessingError = default; + ExitOptions fileUploadError = default; + ExitOptions @default = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("exitCodes"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ExitCodeMapping.DeserializeExitCodeMapping(item, options)); + } + exitCodes = array; + continue; + } + if (property.NameEquals("exitCodeRanges"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ExitCodeRangeMapping.DeserializeExitCodeRangeMapping(item, options)); + } + exitCodeRanges = array; + continue; + } + if (property.NameEquals("preProcessingError"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + preProcessingError = ExitOptions.DeserializeExitOptions(property.Value, options); + continue; + } + if (property.NameEquals("fileUploadError"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + fileUploadError = ExitOptions.DeserializeExitOptions(property.Value, options); + continue; + } + if (property.NameEquals("default"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + @default = ExitOptions.DeserializeExitOptions(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ExitConditions( + exitCodes ?? new ChangeTrackingList(), + exitCodeRanges ?? new ChangeTrackingList(), + preProcessingError, + fileUploadError, + @default, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ExitConditions)} does not support writing '{options.Format}' format."); + } + } + + ExitConditions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeExitConditions(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ExitConditions)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ExitConditions FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeExitConditions(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitConditions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitConditions.cs new file mode 100644 index 0000000000000..7e09b68f12a72 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitConditions.cs @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies how the Batch service should respond when the Task completes. + public partial class ExitConditions + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ExitConditions() + { + ExitCodes = new ChangeTrackingList(); + ExitCodeRanges = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A list of individual Task exit codes and how the Batch service should respond to them. + /// A list of Task exit code ranges and how the Batch service should respond to them. + /// How the Batch service should respond if the Task fails to start due to an error. + /// How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. + /// How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. + /// Keeps track of any properties unknown to the library. + internal ExitConditions(IList exitCodes, IList exitCodeRanges, ExitOptions preProcessingError, ExitOptions fileUploadError, ExitOptions @default, IDictionary serializedAdditionalRawData) + { + ExitCodes = exitCodes; + ExitCodeRanges = exitCodeRanges; + PreProcessingError = preProcessingError; + FileUploadError = fileUploadError; + Default = @default; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A list of individual Task exit codes and how the Batch service should respond to them. + public IList ExitCodes { get; } + /// A list of Task exit code ranges and how the Batch service should respond to them. + public IList ExitCodeRanges { get; } + /// How the Batch service should respond if the Task fails to start due to an error. + public ExitOptions PreProcessingError { get; set; } + /// How the Batch service should respond if a file upload error occurs. If the Task exited with an exit code that was specified via exitCodes or exitCodeRanges, and then encountered a file upload error, then the action specified by the exit code takes precedence. + public ExitOptions FileUploadError { get; set; } + /// How the Batch service should respond if the Task fails with an exit condition not covered by any of the other properties. This value is used if the Task exits with any nonzero exit code not listed in the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want non-default behavior on exit code 0, you must list it explicitly using the exitCodes or exitCodeRanges collection. + public ExitOptions Default { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs new file mode 100644 index 0000000000000..6c8e87057cb4e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ExitOptions : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitOptions)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(JobAction)) + { + writer.WritePropertyName("jobAction"u8); + writer.WriteStringValue(JobAction.Value.ToString()); + } + if (Optional.IsDefined(DependencyAction)) + { + writer.WritePropertyName("dependencyAction"u8); + writer.WriteStringValue(DependencyAction.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ExitOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ExitOptions)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeExitOptions(document.RootElement, options); + } + + internal static ExitOptions DeserializeExitOptions(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + BatchJobAction? jobAction = default; + DependencyAction? dependencyAction = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("jobAction"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + jobAction = new BatchJobAction(property.Value.GetString()); + continue; + } + if (property.NameEquals("dependencyAction"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dependencyAction = new DependencyAction(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ExitOptions(jobAction, dependencyAction, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ExitOptions)} does not support writing '{options.Format}' format."); + } + } + + ExitOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeExitOptions(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ExitOptions)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ExitOptions FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeExitOptions(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs new file mode 100644 index 0000000000000..43cee79c97f75 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies how the Batch service responds to a particular exit condition. + public partial class ExitOptions + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ExitOptions() + { + } + + /// Initializes a new instance of . + /// An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + /// An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + /// Keeps track of any properties unknown to the library. + internal ExitOptions(BatchJobAction? jobAction, DependencyAction? dependencyAction, IDictionary serializedAdditionalRawData) + { + JobAction = jobAction; + DependencyAction = dependencyAction; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). + public BatchJobAction? JobAction { get; set; } + /// An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. + public DependencyAction? DependencyAction { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs new file mode 100644 index 0000000000000..b0ab715bf8ca1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.Serialization.cs @@ -0,0 +1,186 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class FileProperties : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FileProperties)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(CreationTime)) + { + writer.WritePropertyName("creationTime"u8); + writer.WriteStringValue(CreationTime.Value, "O"); + } + writer.WritePropertyName("lastModified"u8); + writer.WriteStringValue(LastModified, "O"); + writer.WritePropertyName("contentLength"u8); + writer.WriteNumberValue(ContentLength); + if (Optional.IsDefined(ContentType)) + { + writer.WritePropertyName("contentType"u8); + writer.WriteStringValue(ContentType); + } + if (Optional.IsDefined(FileMode)) + { + writer.WritePropertyName("fileMode"u8); + writer.WriteStringValue(FileMode); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + FileProperties IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(FileProperties)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeFileProperties(document.RootElement, options); + } + + internal static FileProperties DeserializeFileProperties(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DateTimeOffset? creationTime = default; + DateTimeOffset lastModified = default; + long contentLength = default; + string contentType = default; + string fileMode = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("creationTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + creationTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("lastModified"u8)) + { + lastModified = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("contentLength"u8)) + { + contentLength = property.Value.GetInt64(); + continue; + } + if (property.NameEquals("contentType"u8)) + { + contentType = property.Value.GetString(); + continue; + } + if (property.NameEquals("fileMode"u8)) + { + fileMode = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new FileProperties( + creationTime, + lastModified, + contentLength, + contentType, + fileMode, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(FileProperties)} does not support writing '{options.Format}' format."); + } + } + + FileProperties IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeFileProperties(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(FileProperties)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static FileProperties FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeFileProperties(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.cs new file mode 100644 index 0000000000000..62a026bccce26 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/FileProperties.cs @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The properties of a file on a Compute Node. + public partial class FileProperties + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The time at which the file was last modified. + /// The length of the file. + internal FileProperties(DateTimeOffset lastModified, long contentLength) + { + LastModified = lastModified; + ContentLength = contentLength; + } + + /// Initializes a new instance of . + /// The file creation time. The creation time is not returned for files on Linux Compute Nodes. + /// The time at which the file was last modified. + /// The length of the file. + /// The content type of the file. + /// The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. + /// Keeps track of any properties unknown to the library. + internal FileProperties(DateTimeOffset? creationTime, DateTimeOffset lastModified, long contentLength, string contentType, string fileMode, IDictionary serializedAdditionalRawData) + { + CreationTime = creationTime; + LastModified = lastModified; + ContentLength = contentLength; + ContentType = contentType; + FileMode = fileMode; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal FileProperties() + { + } + + /// The file creation time. The creation time is not returned for files on Linux Compute Nodes. + public DateTimeOffset? CreationTime { get; } + /// The time at which the file was last modified. + public DateTimeOffset LastModified { get; } + /// The length of the file. + public long ContentLength { get; } + /// The content type of the file. + public string ContentType { get; } + /// The file mode attribute in octal format. The file mode is returned only for files on Linux Compute Nodes. + public string FileMode { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.Serialization.cs new file mode 100644 index 0000000000000..09ded47c08e7b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.Serialization.cs @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class HttpHeader : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(HttpHeader)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + if (Optional.IsDefined(Value)) + { + writer.WritePropertyName("value"u8); + writer.WriteStringValue(Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + HttpHeader IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(HttpHeader)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeHttpHeader(document.RootElement, options); + } + + internal static HttpHeader DeserializeHttpHeader(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("value"u8)) + { + value = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new HttpHeader(name, value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(HttpHeader)} does not support writing '{options.Format}' format."); + } + } + + HttpHeader IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeHttpHeader(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(HttpHeader)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static HttpHeader FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeHttpHeader(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.cs new file mode 100644 index 0000000000000..90e588da517ea --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.cs @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An HTTP header name-value pair. + public partial class HttpHeader + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The case-insensitive name of the header to be used while uploading output files. + /// is null. + public HttpHeader(string name) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + } + + /// Initializes a new instance of . + /// The case-insensitive name of the header to be used while uploading output files. + /// The value of the header to be used while uploading output files. + /// Keeps track of any properties unknown to the library. + internal HttpHeader(string name, string value, IDictionary serializedAdditionalRawData) + { + Name = name; + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal HttpHeader() + { + } + + /// The case-insensitive name of the header to be used while uploading output files. + public string Name { get; set; } + /// The value of the header to be used while uploading output files. + public string Value { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs new file mode 100644 index 0000000000000..117fab510ee67 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs @@ -0,0 +1,200 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ImageReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ImageReference)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Publisher)) + { + writer.WritePropertyName("publisher"u8); + writer.WriteStringValue(Publisher); + } + if (Optional.IsDefined(Offer)) + { + writer.WritePropertyName("offer"u8); + writer.WriteStringValue(Offer); + } + if (Optional.IsDefined(Sku)) + { + writer.WritePropertyName("sku"u8); + writer.WriteStringValue(Sku); + } + if (Optional.IsDefined(Version)) + { + writer.WritePropertyName("version"u8); + writer.WriteStringValue(Version); + } + if (Optional.IsDefined(VirtualMachineImageId)) + { + writer.WritePropertyName("virtualMachineImageId"u8); + writer.WriteStringValue(VirtualMachineImageId); + } + if (options.Format != "W" && Optional.IsDefined(ExactVersion)) + { + writer.WritePropertyName("exactVersion"u8); + writer.WriteStringValue(ExactVersion); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ImageReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ImageReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeImageReference(document.RootElement, options); + } + + internal static ImageReference DeserializeImageReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string publisher = default; + string offer = default; + string sku = default; + string version = default; + string virtualMachineImageId = default; + string exactVersion = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("publisher"u8)) + { + publisher = property.Value.GetString(); + continue; + } + if (property.NameEquals("offer"u8)) + { + offer = property.Value.GetString(); + continue; + } + if (property.NameEquals("sku"u8)) + { + sku = property.Value.GetString(); + continue; + } + if (property.NameEquals("version"u8)) + { + version = property.Value.GetString(); + continue; + } + if (property.NameEquals("virtualMachineImageId"u8)) + { + virtualMachineImageId = property.Value.GetString(); + continue; + } + if (property.NameEquals("exactVersion"u8)) + { + exactVersion = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ImageReference( + publisher, + offer, + sku, + version, + virtualMachineImageId, + exactVersion, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ImageReference)} does not support writing '{options.Format}' format."); + } + } + + ImageReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeImageReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ImageReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ImageReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeImageReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs new file mode 100644 index 0000000000000..6c5cb5fc883f4 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. + /// To get the list of all Azure Marketplace Image references verified by Azure Batch, see the + /// ' List Supported Images ' operation. + /// + public partial class ImageReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ImageReference() + { + } + + /// Initializes a new instance of . + /// The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. + /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. + /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. + /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. + /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. + /// Keeps track of any properties unknown to the library. + internal ImageReference(string publisher, string offer, string sku, string version, string virtualMachineImageId, string exactVersion, IDictionary serializedAdditionalRawData) + { + Publisher = publisher; + Offer = offer; + Sku = sku; + Version = version; + VirtualMachineImageId = virtualMachineImageId; + ExactVersion = exactVersion; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. + public string Publisher { get; set; } + /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. + public string Offer { get; set; } + /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. + public string Sku { get; set; } + /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. + public string Version { get; set; } + /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + public string VirtualMachineImageId { get; set; } + /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. + public string ExactVersion { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageVerificationType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageVerificationType.cs new file mode 100644 index 0000000000000..83b53965544c3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ImageVerificationType.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// ImageVerificationType enums. + public readonly partial struct ImageVerificationType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public ImageVerificationType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string VerifiedValue = "verified"; + private const string UnverifiedValue = "unverified"; + + /// The Image is guaranteed to be compatible with the associated Compute Node agent SKU and all Batch features have been confirmed to work as expected. + public static ImageVerificationType Verified { get; } = new ImageVerificationType(VerifiedValue); + /// The associated Compute Node agent SKU should have binary compatibility with the Image, but specific functionality has not been verified. + public static ImageVerificationType Unverified { get; } = new ImageVerificationType(UnverifiedValue); + /// Determines if two values are the same. + public static bool operator ==(ImageVerificationType left, ImageVerificationType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(ImageVerificationType left, ImageVerificationType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator ImageVerificationType(string value) => new ImageVerificationType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is ImageVerificationType other && Equals(other); + /// + public bool Equals(ImageVerificationType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs new file mode 100644 index 0000000000000..1601adf208238 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs @@ -0,0 +1,182 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class InboundEndpoint : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InboundEndpoint)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("protocol"u8); + writer.WriteStringValue(Protocol.ToString()); + writer.WritePropertyName("publicIPAddress"u8); + writer.WriteStringValue(PublicIpAddress); + writer.WritePropertyName("publicFQDN"u8); + writer.WriteStringValue(PublicFQDN); + writer.WritePropertyName("frontendPort"u8); + writer.WriteNumberValue(FrontendPort); + writer.WritePropertyName("backendPort"u8); + writer.WriteNumberValue(BackendPort); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InboundEndpoint IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InboundEndpoint)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInboundEndpoint(document.RootElement, options); + } + + internal static InboundEndpoint DeserializeInboundEndpoint(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + InboundEndpointProtocol protocol = default; + string publicIPAddress = default; + string publicFQDN = default; + int frontendPort = default; + int backendPort = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("protocol"u8)) + { + protocol = new InboundEndpointProtocol(property.Value.GetString()); + continue; + } + if (property.NameEquals("publicIPAddress"u8)) + { + publicIPAddress = property.Value.GetString(); + continue; + } + if (property.NameEquals("publicFQDN"u8)) + { + publicFQDN = property.Value.GetString(); + continue; + } + if (property.NameEquals("frontendPort"u8)) + { + frontendPort = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("backendPort"u8)) + { + backendPort = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InboundEndpoint( + name, + protocol, + publicIPAddress, + publicFQDN, + frontendPort, + backendPort, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InboundEndpoint)} does not support writing '{options.Format}' format."); + } + } + + InboundEndpoint IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInboundEndpoint(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InboundEndpoint)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static InboundEndpoint FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInboundEndpoint(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs new file mode 100644 index 0000000000000..8febab5a4227c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An inbound endpoint on a Compute Node. + public partial class InboundEndpoint + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the endpoint. + /// The protocol of the endpoint. + /// The public IP address of the Compute Node. + /// The public fully qualified domain name for the Compute Node. + /// The public port number of the endpoint. + /// The backend port number of the endpoint. + /// , or is null. + internal InboundEndpoint(string name, InboundEndpointProtocol protocol, string publicIpAddress, string publicFQDN, int frontendPort, int backendPort) + { + Argument.AssertNotNull(name, nameof(name)); + Argument.AssertNotNull(publicIpAddress, nameof(publicIpAddress)); + Argument.AssertNotNull(publicFQDN, nameof(publicFQDN)); + + Name = name; + Protocol = protocol; + PublicIpAddress = publicIpAddress; + PublicFQDN = publicFQDN; + FrontendPort = frontendPort; + BackendPort = backendPort; + } + + /// Initializes a new instance of . + /// The name of the endpoint. + /// The protocol of the endpoint. + /// The public IP address of the Compute Node. + /// The public fully qualified domain name for the Compute Node. + /// The public port number of the endpoint. + /// The backend port number of the endpoint. + /// Keeps track of any properties unknown to the library. + internal InboundEndpoint(string name, InboundEndpointProtocol protocol, string publicIpAddress, string publicFQDN, int frontendPort, int backendPort, IDictionary serializedAdditionalRawData) + { + Name = name; + Protocol = protocol; + PublicIpAddress = publicIpAddress; + PublicFQDN = publicFQDN; + FrontendPort = frontendPort; + BackendPort = backendPort; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal InboundEndpoint() + { + } + + /// The name of the endpoint. + public string Name { get; } + /// The protocol of the endpoint. + public InboundEndpointProtocol Protocol { get; } + /// The public IP address of the Compute Node. + public string PublicIpAddress { get; } + /// The public fully qualified domain name for the Compute Node. + public string PublicFQDN { get; } + /// The public port number of the endpoint. + public int FrontendPort { get; } + /// The backend port number of the endpoint. + public int BackendPort { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpointProtocol.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpointProtocol.cs new file mode 100644 index 0000000000000..3946b261d5f0e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpointProtocol.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// InboundEndpointProtocol enums. + public readonly partial struct InboundEndpointProtocol : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public InboundEndpointProtocol(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TcpValue = "tcp"; + private const string UdpValue = "udp"; + + /// Use TCP for the endpoint. + public static InboundEndpointProtocol Tcp { get; } = new InboundEndpointProtocol(TcpValue); + /// Use UDP for the endpoint. + public static InboundEndpointProtocol Udp { get; } = new InboundEndpointProtocol(UdpValue); + /// Determines if two values are the same. + public static bool operator ==(InboundEndpointProtocol left, InboundEndpointProtocol right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(InboundEndpointProtocol left, InboundEndpointProtocol right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator InboundEndpointProtocol(string value) => new InboundEndpointProtocol(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is InboundEndpointProtocol other && Equals(other); + /// + public bool Equals(InboundEndpointProtocol other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.Serialization.cs new file mode 100644 index 0000000000000..596bce51442d3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.Serialization.cs @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class InboundNatPool : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InboundNatPool)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("protocol"u8); + writer.WriteStringValue(Protocol.ToString()); + writer.WritePropertyName("backendPort"u8); + writer.WriteNumberValue(BackendPort); + writer.WritePropertyName("frontendPortRangeStart"u8); + writer.WriteNumberValue(FrontendPortRangeStart); + writer.WritePropertyName("frontendPortRangeEnd"u8); + writer.WriteNumberValue(FrontendPortRangeEnd); + if (Optional.IsCollectionDefined(NetworkSecurityGroupRules)) + { + writer.WritePropertyName("networkSecurityGroupRules"u8); + writer.WriteStartArray(); + foreach (var item in NetworkSecurityGroupRules) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InboundNatPool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InboundNatPool)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInboundNatPool(document.RootElement, options); + } + + internal static InboundNatPool DeserializeInboundNatPool(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + InboundEndpointProtocol protocol = default; + int backendPort = default; + int frontendPortRangeStart = default; + int frontendPortRangeEnd = default; + IList networkSecurityGroupRules = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("protocol"u8)) + { + protocol = new InboundEndpointProtocol(property.Value.GetString()); + continue; + } + if (property.NameEquals("backendPort"u8)) + { + backendPort = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("frontendPortRangeStart"u8)) + { + frontendPortRangeStart = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("frontendPortRangeEnd"u8)) + { + frontendPortRangeEnd = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("networkSecurityGroupRules"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NetworkSecurityGroupRule.DeserializeNetworkSecurityGroupRule(item, options)); + } + networkSecurityGroupRules = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InboundNatPool( + name, + protocol, + backendPort, + frontendPortRangeStart, + frontendPortRangeEnd, + networkSecurityGroupRules ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InboundNatPool)} does not support writing '{options.Format}' format."); + } + } + + InboundNatPool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInboundNatPool(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InboundNatPool)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static InboundNatPool FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInboundNatPool(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs new file mode 100644 index 0000000000000..7dabe3fd9da02 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// A inbound NAT Pool that can be used to address specific ports on Compute Nodes + /// in a Batch Pool externally. + /// + public partial class InboundNatPool + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. + /// The protocol of the endpoint. + /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + /// is null. + public InboundNatPool(string name, InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd) + { + Argument.AssertNotNull(name, nameof(name)); + + Name = name; + Protocol = protocol; + BackendPort = backendPort; + FrontendPortRangeStart = frontendPortRangeStart; + FrontendPortRangeEnd = frontendPortRangeEnd; + NetworkSecurityGroupRules = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. + /// The protocol of the endpoint. + /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + /// A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. + /// Keeps track of any properties unknown to the library. + internal InboundNatPool(string name, InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd, IList networkSecurityGroupRules, IDictionary serializedAdditionalRawData) + { + Name = name; + Protocol = protocol; + BackendPort = backendPort; + FrontendPortRangeStart = frontendPortRangeStart; + FrontendPortRangeEnd = frontendPortRangeEnd; + NetworkSecurityGroupRules = networkSecurityGroupRules; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal InboundNatPool() + { + } + + /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. + public string Name { get; set; } + /// The protocol of the endpoint. + public InboundEndpointProtocol Protocol { get; set; } + /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. + public int BackendPort { get; set; } + /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + public int FrontendPortRangeStart { get; set; } + /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. + public int FrontendPortRangeEnd { get; set; } + /// A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. + public IList NetworkSecurityGroupRules { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InstanceViewStatus.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InstanceViewStatus.Serialization.cs new file mode 100644 index 0000000000000..1d458a88f409e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InstanceViewStatus.Serialization.cs @@ -0,0 +1,196 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class InstanceViewStatus : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InstanceViewStatus)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(DisplayStatus)) + { + writer.WritePropertyName("displayStatus"u8); + writer.WriteStringValue(DisplayStatus); + } + if (Optional.IsDefined(Level)) + { + writer.WritePropertyName("level"u8); + writer.WriteStringValue(Level.Value.ToString()); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsDefined(Time)) + { + writer.WritePropertyName("time"u8); + writer.WriteStringValue(Time.Value, "O"); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + InstanceViewStatus IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(InstanceViewStatus)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeInstanceViewStatus(document.RootElement, options); + } + + internal static InstanceViewStatus DeserializeInstanceViewStatus(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + string displayStatus = default; + StatusLevelTypes? level = default; + string message = default; + DateTimeOffset? time = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("displayStatus"u8)) + { + displayStatus = property.Value.GetString(); + continue; + } + if (property.NameEquals("level"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + level = new StatusLevelTypes(property.Value.GetString()); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("time"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + time = property.Value.GetDateTimeOffset("O"); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new InstanceViewStatus( + code, + displayStatus, + level, + message, + time, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(InstanceViewStatus)} does not support writing '{options.Format}' format."); + } + } + + InstanceViewStatus IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeInstanceViewStatus(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(InstanceViewStatus)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static InstanceViewStatus FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeInstanceViewStatus(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InstanceViewStatus.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InstanceViewStatus.cs new file mode 100644 index 0000000000000..32598b7a71f3e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InstanceViewStatus.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The instance view status. + public partial class InstanceViewStatus + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal InstanceViewStatus() + { + } + + /// Initializes a new instance of . + /// The status code. + /// The localized label for the status. + /// Level code. + /// The detailed status message. + /// The time of the status. + /// Keeps track of any properties unknown to the library. + internal InstanceViewStatus(string code, string displayStatus, StatusLevelTypes? level, string message, DateTimeOffset? time, IDictionary serializedAdditionalRawData) + { + Code = code; + DisplayStatus = displayStatus; + Level = level; + Message = message; + Time = time; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The status code. + public string Code { get; } + /// The localized label for the status. + public string DisplayStatus { get; } + /// Level code. + public StatusLevelTypes? Level { get; } + /// The detailed status message. + public string Message { get; } + /// The time of the status. + public DateTimeOffset? Time { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Argument.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Argument.cs new file mode 100644 index 0000000000000..2d342a29deea4 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Argument.cs @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + internal static class Argument + { + public static void AssertNotNull(T value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + } + + public static void AssertNotNull(T? value, string name) + where T : struct + { + if (!value.HasValue) + { + throw new ArgumentNullException(name); + } + } + + public static void AssertNotNullOrEmpty(IEnumerable value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + if (value is ICollection collectionOfT && collectionOfT.Count == 0) + { + throw new ArgumentException("Value cannot be an empty collection.", name); + } + if (value is ICollection collection && collection.Count == 0) + { + throw new ArgumentException("Value cannot be an empty collection.", name); + } + using IEnumerator e = value.GetEnumerator(); + if (!e.MoveNext()) + { + throw new ArgumentException("Value cannot be an empty collection.", name); + } + } + + public static void AssertNotNullOrEmpty(string value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + if (value.Length == 0) + { + throw new ArgumentException("Value cannot be an empty string.", name); + } + } + + public static void AssertNotNullOrWhiteSpace(string value, string name) + { + if (value is null) + { + throw new ArgumentNullException(name); + } + if (string.IsNullOrWhiteSpace(value)) + { + throw new ArgumentException("Value cannot be empty or contain only white-space characters.", name); + } + } + + public static void AssertNotDefault(ref T value, string name) + where T : struct, IEquatable + { + if (value.Equals(default)) + { + throw new ArgumentException("Value cannot be empty.", name); + } + } + + public static void AssertInRange(T value, T minimum, T maximum, string name) + where T : notnull, IComparable + { + if (minimum.CompareTo(value) > 0) + { + throw new ArgumentOutOfRangeException(name, "Value is less than the minimum allowed."); + } + if (maximum.CompareTo(value) < 0) + { + throw new ArgumentOutOfRangeException(name, "Value is greater than the maximum allowed."); + } + } + + public static void AssertEnumDefined(Type enumType, object value, string name) + { + if (!Enum.IsDefined(enumType, value)) + { + throw new ArgumentException($"Value not defined for {enumType.FullName}.", name); + } + } + + public static T CheckNotNull(T value, string name) + where T : class + { + AssertNotNull(value, name); + return value; + } + + public static string CheckNotNullOrEmpty(string value, string name) + { + AssertNotNullOrEmpty(value, name); + return value; + } + + public static void AssertNull(T value, string name, string message = null) + { + if (value != null) + { + throw new ArgumentException(message ?? "Value must be null.", name); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ChangeTrackingDictionary.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ChangeTrackingDictionary.cs new file mode 100644 index 0000000000000..79f135e1c97fc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ChangeTrackingDictionary.cs @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + internal class ChangeTrackingDictionary : IDictionary, IReadOnlyDictionary where TKey : notnull + { + private IDictionary _innerDictionary; + + public ChangeTrackingDictionary() + { + } + + public ChangeTrackingDictionary(IDictionary dictionary) + { + if (dictionary == null) + { + return; + } + _innerDictionary = new Dictionary(dictionary); + } + + public ChangeTrackingDictionary(IReadOnlyDictionary dictionary) + { + if (dictionary == null) + { + return; + } + _innerDictionary = new Dictionary(); + foreach (var pair in dictionary) + { + _innerDictionary.Add(pair); + } + } + + public bool IsUndefined => _innerDictionary == null; + + public int Count => IsUndefined ? 0 : EnsureDictionary().Count; + + public bool IsReadOnly => IsUndefined ? false : EnsureDictionary().IsReadOnly; + + public ICollection Keys => IsUndefined ? Array.Empty() : EnsureDictionary().Keys; + + public ICollection Values => IsUndefined ? Array.Empty() : EnsureDictionary().Values; + + public TValue this[TKey key] + { + get + { + if (IsUndefined) + { + throw new KeyNotFoundException(nameof(key)); + } + return EnsureDictionary()[key]; + } + set + { + EnsureDictionary()[key] = value; + } + } + + IEnumerable IReadOnlyDictionary.Keys => Keys; + + IEnumerable IReadOnlyDictionary.Values => Values; + + public IEnumerator> GetEnumerator() + { + if (IsUndefined) + { + IEnumerator> enumerateEmpty() + { + yield break; + } + return enumerateEmpty(); + } + return EnsureDictionary().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(KeyValuePair item) + { + EnsureDictionary().Add(item); + } + + public void Clear() + { + EnsureDictionary().Clear(); + } + + public bool Contains(KeyValuePair item) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().Contains(item); + } + + public void CopyTo(KeyValuePair[] array, int index) + { + if (IsUndefined) + { + return; + } + EnsureDictionary().CopyTo(array, index); + } + + public bool Remove(KeyValuePair item) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().Remove(item); + } + + public void Add(TKey key, TValue value) + { + EnsureDictionary().Add(key, value); + } + + public bool ContainsKey(TKey key) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().ContainsKey(key); + } + + public bool Remove(TKey key) + { + if (IsUndefined) + { + return false; + } + return EnsureDictionary().Remove(key); + } + + public bool TryGetValue(TKey key, out TValue value) + { + if (IsUndefined) + { + value = default; + return false; + } + return EnsureDictionary().TryGetValue(key, out value); + } + + public IDictionary EnsureDictionary() + { + return _innerDictionary ??= new Dictionary(); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ChangeTrackingList.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ChangeTrackingList.cs new file mode 100644 index 0000000000000..c730be09d10fd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ChangeTrackingList.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; + +namespace Azure.Compute.Batch +{ + internal class ChangeTrackingList : IList, IReadOnlyList + { + private IList _innerList; + + public ChangeTrackingList() + { + } + + public ChangeTrackingList(IList innerList) + { + if (innerList != null) + { + _innerList = innerList; + } + } + + public ChangeTrackingList(IReadOnlyList innerList) + { + if (innerList != null) + { + _innerList = innerList.ToList(); + } + } + + public bool IsUndefined => _innerList == null; + + public int Count => IsUndefined ? 0 : EnsureList().Count; + + public bool IsReadOnly => IsUndefined ? false : EnsureList().IsReadOnly; + + public T this[int index] + { + get + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + return EnsureList()[index]; + } + set + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + EnsureList()[index] = value; + } + } + + public void Reset() + { + _innerList = null; + } + + public IEnumerator GetEnumerator() + { + if (IsUndefined) + { + IEnumerator enumerateEmpty() + { + yield break; + } + return enumerateEmpty(); + } + return EnsureList().GetEnumerator(); + } + + IEnumerator IEnumerable.GetEnumerator() + { + return GetEnumerator(); + } + + public void Add(T item) + { + EnsureList().Add(item); + } + + public void Clear() + { + EnsureList().Clear(); + } + + public bool Contains(T item) + { + if (IsUndefined) + { + return false; + } + return EnsureList().Contains(item); + } + + public void CopyTo(T[] array, int arrayIndex) + { + if (IsUndefined) + { + return; + } + EnsureList().CopyTo(array, arrayIndex); + } + + public bool Remove(T item) + { + if (IsUndefined) + { + return false; + } + return EnsureList().Remove(item); + } + + public int IndexOf(T item) + { + if (IsUndefined) + { + return -1; + } + return EnsureList().IndexOf(item); + } + + public void Insert(int index, T item) + { + EnsureList().Insert(index, item); + } + + public void RemoveAt(int index) + { + if (IsUndefined) + { + throw new ArgumentOutOfRangeException(nameof(index)); + } + EnsureList().RemoveAt(index); + } + + public IList EnsureList() + { + return _innerList ??= new List(); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ModelSerializationExtensions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ModelSerializationExtensions.cs new file mode 100644 index 0000000000000..e8ed693cc305b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/ModelSerializationExtensions.cs @@ -0,0 +1,398 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Diagnostics; +using System.Globalization; +using System.Text.Json; +using System.Xml; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + internal static class ModelSerializationExtensions + { + internal static readonly ModelReaderWriterOptions WireOptions = new ModelReaderWriterOptions("W"); + + public static object GetObject(this JsonElement element) + { + switch (element.ValueKind) + { + case JsonValueKind.String: + return element.GetString(); + case JsonValueKind.Number: + if (element.TryGetInt32(out int intValue)) + { + return intValue; + } + if (element.TryGetInt64(out long longValue)) + { + return longValue; + } + return element.GetDouble(); + case JsonValueKind.True: + return true; + case JsonValueKind.False: + return false; + case JsonValueKind.Undefined: + case JsonValueKind.Null: + return null; + case JsonValueKind.Object: + var dictionary = new Dictionary(); + foreach (var jsonProperty in element.EnumerateObject()) + { + dictionary.Add(jsonProperty.Name, jsonProperty.Value.GetObject()); + } + return dictionary; + case JsonValueKind.Array: + var list = new List(); + foreach (var item in element.EnumerateArray()) + { + list.Add(item.GetObject()); + } + return list.ToArray(); + default: + throw new NotSupportedException($"Not supported value kind {element.ValueKind}"); + } + } + + public static byte[] GetBytesFromBase64(this JsonElement element, string format) + { + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + + return format switch + { + "U" => TypeFormatters.FromBase64UrlString(element.GetRequiredString()), + "D" => element.GetBytesFromBase64(), + _ => throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)) + }; + } + + public static DateTimeOffset GetDateTimeOffset(this JsonElement element, string format) => format switch + { + "U" when element.ValueKind == JsonValueKind.Number => DateTimeOffset.FromUnixTimeSeconds(element.GetInt64()), + _ => TypeFormatters.ParseDateTimeOffset(element.GetString(), format) + }; + + public static TimeSpan GetTimeSpan(this JsonElement element, string format) => TypeFormatters.ParseTimeSpan(element.GetString(), format); + + public static char GetChar(this JsonElement element) + { + if (element.ValueKind == JsonValueKind.String) + { + var text = element.GetString(); + if (text == null || text.Length != 1) + { + throw new NotSupportedException($"Cannot convert \"{text}\" to a char"); + } + return text[0]; + } + else + { + throw new NotSupportedException($"Cannot convert {element.ValueKind} to a char"); + } + } + + [Conditional("DEBUG")] + public static void ThrowNonNullablePropertyIsNull(this JsonProperty property) + { + throw new JsonException($"A property '{property.Name}' defined as non-nullable but received as null from the service. This exception only happens in DEBUG builds of the library and would be ignored in the release build"); + } + + public static string GetRequiredString(this JsonElement element) + { + var value = element.GetString(); + if (value == null) + { + throw new InvalidOperationException($"The requested operation requires an element of type 'String', but the target element has type '{element.ValueKind}'."); + } + return value; + } + + public static void WriteStringValue(this Utf8JsonWriter writer, DateTimeOffset value, string format) + { + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + } + + public static void WriteStringValue(this Utf8JsonWriter writer, DateTime value, string format) + { + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + } + + public static void WriteStringValue(this Utf8JsonWriter writer, TimeSpan value, string format) + { + writer.WriteStringValue(TypeFormatters.ToString(value, format)); + } + + public static void WriteStringValue(this Utf8JsonWriter writer, char value) + { + writer.WriteStringValue(value.ToString(CultureInfo.InvariantCulture)); + } + + public static void WriteBase64StringValue(this Utf8JsonWriter writer, byte[] value, string format) + { + if (value == null) + { + writer.WriteNullValue(); + return; + } + switch (format) + { + case "U": + writer.WriteStringValue(TypeFormatters.ToBase64UrlString(value)); + break; + case "D": + writer.WriteBase64StringValue(value); + break; + default: + throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)); + } + } + + public static void WriteNumberValue(this Utf8JsonWriter writer, DateTimeOffset value, string format) + { + if (format != "U") + { + throw new ArgumentOutOfRangeException(nameof(format), "Only 'U' format is supported when writing a DateTimeOffset as a Number."); + } + writer.WriteNumberValue(value.ToUnixTimeSeconds()); + } + + public static void WriteObjectValue(this Utf8JsonWriter writer, T value, ModelReaderWriterOptions options = null) + { + switch (value) + { + case null: + writer.WriteNullValue(); + break; + case IJsonModel jsonModel: + jsonModel.Write(writer, options ?? WireOptions); + break; + case IUtf8JsonSerializable serializable: + serializable.Write(writer); + break; + case byte[] bytes: + writer.WriteBase64StringValue(bytes); + break; + case BinaryData bytes0: + writer.WriteBase64StringValue(bytes0); + break; + case JsonElement json: + json.WriteTo(writer); + break; + case int i: + writer.WriteNumberValue(i); + break; + case decimal d: + writer.WriteNumberValue(d); + break; + case double d0: + if (double.IsNaN(d0)) + { + writer.WriteStringValue("NaN"); + } + else + { + writer.WriteNumberValue(d0); + } + break; + case float f: + writer.WriteNumberValue(f); + break; + case long l: + writer.WriteNumberValue(l); + break; + case string s: + writer.WriteStringValue(s); + break; + case bool b: + writer.WriteBooleanValue(b); + break; + case Guid g: + writer.WriteStringValue(g); + break; + case DateTimeOffset dateTimeOffset: + writer.WriteStringValue(dateTimeOffset, "O"); + break; + case DateTime dateTime: + writer.WriteStringValue(dateTime, "O"); + break; + case IEnumerable> enumerable: + writer.WriteStartObject(); + foreach (var pair in enumerable) + { + writer.WritePropertyName(pair.Key); + writer.WriteObjectValue(pair.Value, options); + } + writer.WriteEndObject(); + break; + case IEnumerable objectEnumerable: + writer.WriteStartArray(); + foreach (var item in objectEnumerable) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + break; + case TimeSpan timeSpan: + writer.WriteStringValue(timeSpan, "P"); + break; + default: + throw new NotSupportedException($"Not supported type {value.GetType()}"); + } + } + + public static void WriteObjectValue(this Utf8JsonWriter writer, object value, ModelReaderWriterOptions options = null) + { + writer.WriteObjectValue(value, options); + } + + internal static class TypeFormatters + { + private const string RoundtripZFormat = "yyyy-MM-ddTHH:mm:ss.fffffffZ"; + public const string DefaultNumberFormat = "G"; + + public static string ToString(bool value) => value ? "true" : "false"; + + public static string ToString(DateTime value, string format) => value.Kind switch + { + DateTimeKind.Utc => ToString((DateTimeOffset)value, format), + _ => throw new NotSupportedException($"DateTime {value} has a Kind of {value.Kind}. Azure SDK requires it to be UTC. You can call DateTime.SpecifyKind to change Kind property value to DateTimeKind.Utc.") + }; + + public static string ToString(DateTimeOffset value, string format) => format switch + { + "D" => value.ToString("yyyy-MM-dd", CultureInfo.InvariantCulture), + "U" => value.ToUnixTimeSeconds().ToString(CultureInfo.InvariantCulture), + "O" => value.ToUniversalTime().ToString(RoundtripZFormat, CultureInfo.InvariantCulture), + "o" => value.ToUniversalTime().ToString(RoundtripZFormat, CultureInfo.InvariantCulture), + "R" => value.ToString("r", CultureInfo.InvariantCulture), + _ => value.ToString(format, CultureInfo.InvariantCulture) + }; + + public static string ToString(TimeSpan value, string format) => format switch + { + "P" => XmlConvert.ToString(value), + _ => value.ToString(format, CultureInfo.InvariantCulture) + }; + + public static string ToString(byte[] value, string format) => format switch + { + "U" => ToBase64UrlString(value), + "D" => Convert.ToBase64String(value), + _ => throw new ArgumentException($"Format is not supported: '{format}'", nameof(format)) + }; + + public static string ToBase64UrlString(byte[] value) + { + int numWholeOrPartialInputBlocks = checked(value.Length + 2) / 3; + int size = checked(numWholeOrPartialInputBlocks * 4); + char[] output = new char[size]; + + int numBase64Chars = Convert.ToBase64CharArray(value, 0, value.Length, output, 0); + + int i = 0; + for (; i < numBase64Chars; i++) + { + char ch = output[i]; + if (ch == '+') + { + output[i] = '-'; + } + else + { + if (ch == '/') + { + output[i] = '_'; + } + else + { + if (ch == '=') + { + break; + } + } + } + } + + return new string(output, 0, i); + } + + public static byte[] FromBase64UrlString(string value) + { + int paddingCharsToAdd = (value.Length % 4) switch + { + 0 => 0, + 2 => 2, + 3 => 1, + _ => throw new InvalidOperationException("Malformed input") + }; + char[] output = new char[(value.Length + paddingCharsToAdd)]; + int i = 0; + for (; i < value.Length; i++) + { + char ch = value[i]; + if (ch == '-') + { + output[i] = '+'; + } + else + { + if (ch == '_') + { + output[i] = '/'; + } + else + { + output[i] = ch; + } + } + } + + for (; i < output.Length; i++) + { + output[i] = '='; + } + + return Convert.FromBase64CharArray(output, 0, output.Length); + } + + public static DateTimeOffset ParseDateTimeOffset(string value, string format) => format switch + { + "U" => DateTimeOffset.FromUnixTimeSeconds(long.Parse(value, CultureInfo.InvariantCulture)), + _ => DateTimeOffset.Parse(value, CultureInfo.InvariantCulture, DateTimeStyles.AssumeUniversal) + }; + + public static TimeSpan ParseTimeSpan(string value, string format) => format switch + { + "P" => XmlConvert.ToTimeSpan(value), + _ => TimeSpan.ParseExact(value, format, CultureInfo.InvariantCulture) + }; + + public static string ConvertToString(object value, string format = null) => value switch + { + null => "null", + string s => s, + bool b => ToString(b), + int or float or double or long or decimal => ((IFormattable)value).ToString(DefaultNumberFormat, CultureInfo.InvariantCulture), + byte[] b0 when format != null => ToString(b0, format), + IEnumerable s0 => string.Join(",", s0), + DateTimeOffset dateTime when format != null => ToString(dateTime, format), + TimeSpan timeSpan when format != null => ToString(timeSpan, format), + TimeSpan timeSpan0 => XmlConvert.ToString(timeSpan0), + Guid guid => guid.ToString(), + BinaryData binaryData => ConvertToString(binaryData.ToArray(), format), + _ => value.ToString() + }; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Optional.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Optional.cs new file mode 100644 index 0000000000000..fd4de74cc14cb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Optional.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System.Collections.Generic; +using System.Text.Json; + +namespace Azure.Compute.Batch +{ + internal static class Optional + { + public static bool IsCollectionDefined(IEnumerable collection) + { + return !(collection is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined); + } + + public static bool IsCollectionDefined(IDictionary collection) + { + return !(collection is ChangeTrackingDictionary changeTrackingDictionary && changeTrackingDictionary.IsUndefined); + } + + public static bool IsCollectionDefined(IReadOnlyDictionary collection) + { + return !(collection is ChangeTrackingDictionary changeTrackingDictionary && changeTrackingDictionary.IsUndefined); + } + + public static bool IsDefined(T? value) + where T : struct + { + return value.HasValue; + } + + public static bool IsDefined(object value) + { + return value != null; + } + + public static bool IsDefined(JsonElement value) + { + return value.ValueKind != JsonValueKind.Undefined; + } + + public static bool IsDefined(string value) + { + return value != null; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Utf8JsonRequestContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Utf8JsonRequestContent.cs new file mode 100644 index 0000000000000..83b2e279a9dce --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Internal/Utf8JsonRequestContent.cs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System.IO; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + internal class Utf8JsonRequestContent : RequestContent + { + private readonly MemoryStream _stream; + private readonly RequestContent _content; + + public Utf8JsonRequestContent() + { + _stream = new MemoryStream(); + _content = Create(_stream); + JsonWriter = new Utf8JsonWriter(_stream); + } + + public Utf8JsonWriter JsonWriter { get; } + + public override async Task WriteToAsync(Stream stream, CancellationToken cancellationToken = default) + { + await JsonWriter.FlushAsync().ConfigureAwait(false); + await _content.WriteToAsync(stream, cancellationToken).ConfigureAwait(false); + } + + public override void WriteTo(Stream stream, CancellationToken cancellationToken = default) + { + JsonWriter.Flush(); + _content.WriteTo(stream, cancellationToken); + } + + public override bool TryComputeLength(out long length) + { + length = JsonWriter.BytesCommitted + JsonWriter.BytesPending; + return true; + } + + public override void Dispose() + { + JsonWriter.Dispose(); + _content.Dispose(); + _stream.Dispose(); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/IpAddressProvisioningType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/IpAddressProvisioningType.cs new file mode 100644 index 0000000000000..812e1dc8e828d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/IpAddressProvisioningType.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// IPAddressProvisioningType enums. + public readonly partial struct IpAddressProvisioningType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public IpAddressProvisioningType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string BatchManagedValue = "batchmanaged"; + private const string UserManagedValue = "usermanaged"; + private const string NoPublicIpAddressesValue = "nopublicipaddresses"; + + /// A public IP will be created and managed by Batch. There may be multiple public IPs depending on the size of the Pool. + public static IpAddressProvisioningType BatchManaged { get; } = new IpAddressProvisioningType(BatchManagedValue); + /// Public IPs are provided by the user and will be used to provision the Compute Nodes. + public static IpAddressProvisioningType UserManaged { get; } = new IpAddressProvisioningType(UserManagedValue); + /// No public IP Address will be created. + public static IpAddressProvisioningType NoPublicIpAddresses { get; } = new IpAddressProvisioningType(NoPublicIpAddressesValue); + /// Determines if two values are the same. + public static bool operator ==(IpAddressProvisioningType left, IpAddressProvisioningType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(IpAddressProvisioningType left, IpAddressProvisioningType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator IpAddressProvisioningType(string value) => new IpAddressProvisioningType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is IpAddressProvisioningType other && Equals(other); + /// + public bool Equals(IpAddressProvisioningType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/LinuxUserConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/LinuxUserConfiguration.Serialization.cs new file mode 100644 index 0000000000000..bb92943b03125 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/LinuxUserConfiguration.Serialization.cs @@ -0,0 +1,168 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class LinuxUserConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(LinuxUserConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Uid)) + { + writer.WritePropertyName("uid"u8); + writer.WriteNumberValue(Uid.Value); + } + if (Optional.IsDefined(Gid)) + { + writer.WritePropertyName("gid"u8); + writer.WriteNumberValue(Gid.Value); + } + if (Optional.IsDefined(SshPrivateKey)) + { + writer.WritePropertyName("sshPrivateKey"u8); + writer.WriteStringValue(SshPrivateKey); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + LinuxUserConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(LinuxUserConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeLinuxUserConfiguration(document.RootElement, options); + } + + internal static LinuxUserConfiguration DeserializeLinuxUserConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int? uid = default; + int? gid = default; + string sshPrivateKey = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("uid"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + uid = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("gid"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + gid = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("sshPrivateKey"u8)) + { + sshPrivateKey = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new LinuxUserConfiguration(uid, gid, sshPrivateKey, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(LinuxUserConfiguration)} does not support writing '{options.Format}' format."); + } + } + + LinuxUserConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeLinuxUserConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(LinuxUserConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static LinuxUserConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeLinuxUserConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/LinuxUserConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/LinuxUserConfiguration.cs new file mode 100644 index 0000000000000..a24b99e10fcef --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/LinuxUserConfiguration.cs @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Properties used to create a user Account on a Linux Compute Node. + public partial class LinuxUserConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public LinuxUserConfiguration() + { + } + + /// Initializes a new instance of . + /// The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. + /// The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. + /// The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). + /// Keeps track of any properties unknown to the library. + internal LinuxUserConfiguration(int? uid, int? gid, string sshPrivateKey, IDictionary serializedAdditionalRawData) + { + Uid = uid; + Gid = gid; + SshPrivateKey = sshPrivateKey; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The user ID of the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the uid. + public int? Uid { get; set; } + /// The group ID for the user Account. The uid and gid properties must be specified together or not at all. If not specified the underlying operating system picks the gid. + public int? Gid { get; set; } + /// The SSH private key for the user Account. The private key must not be password protected. The private key is used to automatically configure asymmetric-key based authentication for SSH between Compute Nodes in a Linux Pool when the Pool's enableInterNodeCommunication property is true (it is ignored if enableInterNodeCommunication is false). It does this by placing the key pair into the user's .ssh directory. If not specified, password-less SSH is not configured between Compute Nodes (no modification of the user's .ssh directory is done). + public string SshPrivateKey { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/LoginMode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/LoginMode.cs new file mode 100644 index 0000000000000..0de981032164b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/LoginMode.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// LoginMode enums. + public readonly partial struct LoginMode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public LoginMode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string BatchValue = "batch"; + private const string InteractiveValue = "interactive"; + + /// The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. + public static LoginMode Batch { get; } = new LoginMode(BatchValue); + /// The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration Pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration Pool, the user session will not be elevated unless the application executed by the Task command line is configured to always require administrative privilege or to always require maximum privilege. + public static LoginMode Interactive { get; } = new LoginMode(InteractiveValue); + /// Determines if two values are the same. + public static bool operator ==(LoginMode left, LoginMode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(LoginMode left, LoginMode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator LoginMode(string value) => new LoginMode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is LoginMode other && Equals(other); + /// + public bool Equals(LoginMode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs new file mode 100644 index 0000000000000..b3cd49d9cb82b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ManagedDisk : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ManagedDisk)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("storageAccountType"u8); + writer.WriteStringValue(StorageAccountType.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ManagedDisk IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ManagedDisk)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeManagedDisk(document.RootElement, options); + } + + internal static ManagedDisk DeserializeManagedDisk(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + StorageAccountType storageAccountType = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("storageAccountType"u8)) + { + storageAccountType = new StorageAccountType(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ManagedDisk(storageAccountType, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ManagedDisk)} does not support writing '{options.Format}' format."); + } + } + + ManagedDisk IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeManagedDisk(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ManagedDisk)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ManagedDisk FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeManagedDisk(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs new file mode 100644 index 0000000000000..f8ada07e8f729 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The managed disk parameters. + public partial class ManagedDisk + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The storage account type for managed disk. + public ManagedDisk(StorageAccountType storageAccountType) + { + StorageAccountType = storageAccountType; + } + + /// Initializes a new instance of . + /// The storage account type for managed disk. + /// Keeps track of any properties unknown to the library. + internal ManagedDisk(StorageAccountType storageAccountType, IDictionary serializedAdditionalRawData) + { + StorageAccountType = storageAccountType; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ManagedDisk() + { + } + + /// The storage account type for managed disk. + public StorageAccountType StorageAccountType { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.Serialization.cs new file mode 100644 index 0000000000000..424b946c9ac81 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class MetadataItem : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MetadataItem)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("value"u8); + writer.WriteStringValue(Value); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MetadataItem IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MetadataItem)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMetadataItem(document.RootElement, options); + } + + internal static MetadataItem DeserializeMetadataItem(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("value"u8)) + { + value = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new MetadataItem(name, value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MetadataItem)} does not support writing '{options.Format}' format."); + } + } + + MetadataItem IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMetadataItem(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MetadataItem)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static MetadataItem FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMetadataItem(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.cs new file mode 100644 index 0000000000000..910a0b6b76209 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.cs @@ -0,0 +1,85 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The Batch service does not assign any meaning to this metadata; it is solely + /// for the use of user code. + /// + public partial class MetadataItem + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the metadata item. + /// The value of the metadata item. + /// or is null. + public MetadataItem(string name, string value) + { + Argument.AssertNotNull(name, nameof(name)); + Argument.AssertNotNull(value, nameof(value)); + + Name = name; + Value = value; + } + + /// Initializes a new instance of . + /// The name of the metadata item. + /// The value of the metadata item. + /// Keeps track of any properties unknown to the library. + internal MetadataItem(string name, string value, IDictionary serializedAdditionalRawData) + { + Name = name; + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal MetadataItem() + { + } + + /// The name of the metadata item. + public string Name { get; set; } + /// The value of the metadata item. + public string Value { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MountConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/MountConfiguration.Serialization.cs new file mode 100644 index 0000000000000..387392062b56c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/MountConfiguration.Serialization.cs @@ -0,0 +1,187 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class MountConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MountConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(AzureBlobFileSystemConfiguration)) + { + writer.WritePropertyName("azureBlobFileSystemConfiguration"u8); + writer.WriteObjectValue(AzureBlobFileSystemConfiguration, options); + } + if (Optional.IsDefined(NfsMountConfiguration)) + { + writer.WritePropertyName("nfsMountConfiguration"u8); + writer.WriteObjectValue(NfsMountConfiguration, options); + } + if (Optional.IsDefined(CifsMountConfiguration)) + { + writer.WritePropertyName("cifsMountConfiguration"u8); + writer.WriteObjectValue(CifsMountConfiguration, options); + } + if (Optional.IsDefined(AzureFileShareConfiguration)) + { + writer.WritePropertyName("azureFileShareConfiguration"u8); + writer.WriteObjectValue(AzureFileShareConfiguration, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MountConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MountConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMountConfiguration(document.RootElement, options); + } + + internal static MountConfiguration DeserializeMountConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + AzureBlobFileSystemConfiguration azureBlobFileSystemConfiguration = default; + NfsMountConfiguration nfsMountConfiguration = default; + CifsMountConfiguration cifsMountConfiguration = default; + AzureFileShareConfiguration azureFileShareConfiguration = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("azureBlobFileSystemConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + azureBlobFileSystemConfiguration = AzureBlobFileSystemConfiguration.DeserializeAzureBlobFileSystemConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("nfsMountConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nfsMountConfiguration = NfsMountConfiguration.DeserializeNfsMountConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("cifsMountConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + cifsMountConfiguration = CifsMountConfiguration.DeserializeCifsMountConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("azureFileShareConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + azureFileShareConfiguration = AzureFileShareConfiguration.DeserializeAzureFileShareConfiguration(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new MountConfiguration(azureBlobFileSystemConfiguration, nfsMountConfiguration, cifsMountConfiguration, azureFileShareConfiguration, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MountConfiguration)} does not support writing '{options.Format}' format."); + } + } + + MountConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMountConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MountConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static MountConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMountConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MountConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/MountConfiguration.cs new file mode 100644 index 0000000000000..6a04472d6c941 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/MountConfiguration.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The file system to mount on each node. + public partial class MountConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public MountConfiguration() + { + } + + /// Initializes a new instance of . + /// The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. + /// The NFS file system to mount on each node. This property is mutually exclusive with all other properties. + /// The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. + /// The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. + /// Keeps track of any properties unknown to the library. + internal MountConfiguration(AzureBlobFileSystemConfiguration azureBlobFileSystemConfiguration, NfsMountConfiguration nfsMountConfiguration, CifsMountConfiguration cifsMountConfiguration, AzureFileShareConfiguration azureFileShareConfiguration, IDictionary serializedAdditionalRawData) + { + AzureBlobFileSystemConfiguration = azureBlobFileSystemConfiguration; + NfsMountConfiguration = nfsMountConfiguration; + CifsMountConfiguration = cifsMountConfiguration; + AzureFileShareConfiguration = azureFileShareConfiguration; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The Azure Storage Container to mount using blob FUSE on each node. This property is mutually exclusive with all other properties. + public AzureBlobFileSystemConfiguration AzureBlobFileSystemConfiguration { get; set; } + /// The NFS file system to mount on each node. This property is mutually exclusive with all other properties. + public NfsMountConfiguration NfsMountConfiguration { get; set; } + /// The CIFS/SMB file system to mount on each node. This property is mutually exclusive with all other properties. + public CifsMountConfiguration CifsMountConfiguration { get; set; } + /// The Azure File Share to mount on each node. This property is mutually exclusive with all other properties. + public AzureFileShareConfiguration AzureFileShareConfiguration { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MultiInstanceSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/MultiInstanceSettings.Serialization.cs new file mode 100644 index 0000000000000..1b72767d544d7 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/MultiInstanceSettings.Serialization.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class MultiInstanceSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MultiInstanceSettings)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(NumberOfInstances)) + { + writer.WritePropertyName("numberOfInstances"u8); + writer.WriteNumberValue(NumberOfInstances.Value); + } + writer.WritePropertyName("coordinationCommandLine"u8); + writer.WriteStringValue(CoordinationCommandLine); + if (Optional.IsCollectionDefined(CommonResourceFiles)) + { + writer.WritePropertyName("commonResourceFiles"u8); + writer.WriteStartArray(); + foreach (var item in CommonResourceFiles) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + MultiInstanceSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(MultiInstanceSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeMultiInstanceSettings(document.RootElement, options); + } + + internal static MultiInstanceSettings DeserializeMultiInstanceSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int? numberOfInstances = default; + string coordinationCommandLine = default; + IList commonResourceFiles = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("numberOfInstances"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + numberOfInstances = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("coordinationCommandLine"u8)) + { + coordinationCommandLine = property.Value.GetString(); + continue; + } + if (property.NameEquals("commonResourceFiles"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(ResourceFile.DeserializeResourceFile(item, options)); + } + commonResourceFiles = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new MultiInstanceSettings(numberOfInstances, coordinationCommandLine, commonResourceFiles ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(MultiInstanceSettings)} does not support writing '{options.Format}' format."); + } + } + + MultiInstanceSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeMultiInstanceSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(MultiInstanceSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static MultiInstanceSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeMultiInstanceSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MultiInstanceSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/MultiInstanceSettings.cs new file mode 100644 index 0000000000000..754506e8d5a54 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/MultiInstanceSettings.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, + /// if any of the subtasks fail (for example due to exiting with a non-zero exit + /// code) the entire multi-instance Task fails. The multi-instance Task is then + /// terminated and retried, up to its retry limit. + /// + public partial class MultiInstanceSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. + /// is null. + public MultiInstanceSettings(string coordinationCommandLine) + { + Argument.AssertNotNull(coordinationCommandLine, nameof(coordinationCommandLine)); + + CoordinationCommandLine = coordinationCommandLine; + CommonResourceFiles = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The number of Compute Nodes required by the Task. If omitted, the default is 1. + /// The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. + /// A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + /// Keeps track of any properties unknown to the library. + internal MultiInstanceSettings(int? numberOfInstances, string coordinationCommandLine, IList commonResourceFiles, IDictionary serializedAdditionalRawData) + { + NumberOfInstances = numberOfInstances; + CoordinationCommandLine = coordinationCommandLine; + CommonResourceFiles = commonResourceFiles; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal MultiInstanceSettings() + { + } + + /// The number of Compute Nodes required by the Task. If omitted, the default is 1. + public int? NumberOfInstances { get; set; } + /// The command line to run on all the Compute Nodes to enable them to coordinate when the primary runs the main Task command. A typical coordination command line launches a background service and verifies that the service is ready to process inter-node messages. + public string CoordinationCommandLine { get; set; } + /// A list of files that the Batch service will download before running the coordination command line. The difference between common resource files and Task resource files is that common resource files are downloaded for all subtasks including the primary, whereas Task resource files are downloaded only for the primary. Also note that these resource files are not downloaded to the Task working directory, but instead are downloaded to the Task root directory (one directory above the working directory). There is a maximum size for the list of resource files. When the max size is exceeded, the request will fail and the response error code will be RequestEntityTooLarge. If this occurs, the collection of ResourceFiles must be reduced in size. This can be achieved using .zip files, Application Packages, or Docker Containers. + public IList CommonResourceFiles { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NameValuePair.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NameValuePair.Serialization.cs new file mode 100644 index 0000000000000..df9413cc5c250 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NameValuePair.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class NameValuePair : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NameValuePair)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Name)) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + if (Optional.IsDefined(Value)) + { + writer.WritePropertyName("value"u8); + writer.WriteStringValue(Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + NameValuePair IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NameValuePair)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeNameValuePair(document.RootElement, options); + } + + internal static NameValuePair DeserializeNameValuePair(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string value = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("value"u8)) + { + value = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new NameValuePair(name, value, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(NameValuePair)} does not support writing '{options.Format}' format."); + } + } + + NameValuePair IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeNameValuePair(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(NameValuePair)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static NameValuePair FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeNameValuePair(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NameValuePair.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NameValuePair.cs new file mode 100644 index 0000000000000..93f43a1e217dc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NameValuePair.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Represents a name-value pair. + public partial class NameValuePair + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public NameValuePair() + { + } + + /// Initializes a new instance of . + /// The name in the name-value pair. + /// The value in the name-value pair. + /// Keeps track of any properties unknown to the library. + internal NameValuePair(string name, string value, IDictionary serializedAdditionalRawData) + { + Name = name; + Value = value; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The name in the name-value pair. + public string Name { get; set; } + /// The value in the name-value pair. + public string Value { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs new file mode 100644 index 0000000000000..36ede0f652592 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs @@ -0,0 +1,204 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class NetworkConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NetworkConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(SubnetId)) + { + writer.WritePropertyName("subnetId"u8); + writer.WriteStringValue(SubnetId); + } + if (Optional.IsDefined(DynamicVNetAssignmentScope)) + { + writer.WritePropertyName("dynamicVNetAssignmentScope"u8); + writer.WriteStringValue(DynamicVNetAssignmentScope.Value.ToString()); + } + if (Optional.IsDefined(EndpointConfiguration)) + { + writer.WritePropertyName("endpointConfiguration"u8); + writer.WriteObjectValue(EndpointConfiguration, options); + } + if (Optional.IsDefined(PublicIpAddressConfiguration)) + { + writer.WritePropertyName("publicIPAddressConfiguration"u8); + writer.WriteObjectValue(PublicIpAddressConfiguration, options); + } + if (Optional.IsDefined(EnableAcceleratedNetworking)) + { + writer.WritePropertyName("enableAcceleratedNetworking"u8); + writer.WriteBooleanValue(EnableAcceleratedNetworking.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + NetworkConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NetworkConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeNetworkConfiguration(document.RootElement, options); + } + + internal static NetworkConfiguration DeserializeNetworkConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string subnetId = default; + DynamicVNetAssignmentScope? dynamicVNetAssignmentScope = default; + BatchPoolEndpointConfiguration endpointConfiguration = default; + PublicIpAddressConfiguration publicIPAddressConfiguration = default; + bool? enableAcceleratedNetworking = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("subnetId"u8)) + { + subnetId = property.Value.GetString(); + continue; + } + if (property.NameEquals("dynamicVNetAssignmentScope"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + dynamicVNetAssignmentScope = new DynamicVNetAssignmentScope(property.Value.GetString()); + continue; + } + if (property.NameEquals("endpointConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endpointConfiguration = BatchPoolEndpointConfiguration.DeserializeBatchPoolEndpointConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("publicIPAddressConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + publicIPAddressConfiguration = PublicIpAddressConfiguration.DeserializePublicIpAddressConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("enableAcceleratedNetworking"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAcceleratedNetworking = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new NetworkConfiguration( + subnetId, + dynamicVNetAssignmentScope, + endpointConfiguration, + publicIPAddressConfiguration, + enableAcceleratedNetworking, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(NetworkConfiguration)} does not support writing '{options.Format}' format."); + } + } + + NetworkConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeNetworkConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(NetworkConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static NetworkConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeNetworkConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs new file mode 100644 index 0000000000000..e69a292a35016 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The network configuration for a Pool. + public partial class NetworkConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public NetworkConfiguration() + { + } + + /// Initializes a new instance of . + /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + /// The scope of dynamic vnet assignment. + /// The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. + /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. + /// Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. + /// Keeps track of any properties unknown to the library. + internal NetworkConfiguration(string subnetId, DynamicVNetAssignmentScope? dynamicVNetAssignmentScope, BatchPoolEndpointConfiguration endpointConfiguration, PublicIpAddressConfiguration publicIpAddressConfiguration, bool? enableAcceleratedNetworking, IDictionary serializedAdditionalRawData) + { + SubnetId = subnetId; + DynamicVNetAssignmentScope = dynamicVNetAssignmentScope; + EndpointConfiguration = endpointConfiguration; + PublicIpAddressConfiguration = publicIpAddressConfiguration; + EnableAcceleratedNetworking = enableAcceleratedNetworking; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. For Pools created with virtualMachineConfiguration only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication. For Pools created with a virtual machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Also enable outbound connections to Azure Storage on port 443. For more details see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + public string SubnetId { get; set; } + /// The scope of dynamic vnet assignment. + public DynamicVNetAssignmentScope? DynamicVNetAssignmentScope { get; set; } + /// The configuration for endpoints on Compute Nodes in the Batch Pool. Pool endpoint configuration is only supported on Pools with the virtualMachineConfiguration property. + public BatchPoolEndpointConfiguration EndpointConfiguration { get; set; } + /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. Public IP configuration property is only supported on Pools with the virtualMachineConfiguration property. + public PublicIpAddressConfiguration PublicIpAddressConfiguration { get; set; } + /// Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. + public bool? EnableAcceleratedNetworking { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRule.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRule.Serialization.cs new file mode 100644 index 0000000000000..efdb0b503e6f5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRule.Serialization.cs @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class NetworkSecurityGroupRule : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NetworkSecurityGroupRule)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("priority"u8); + writer.WriteNumberValue(Priority); + writer.WritePropertyName("access"u8); + writer.WriteStringValue(Access.ToString()); + writer.WritePropertyName("sourceAddressPrefix"u8); + writer.WriteStringValue(SourceAddressPrefix); + if (Optional.IsCollectionDefined(SourcePortRanges)) + { + writer.WritePropertyName("sourcePortRanges"u8); + writer.WriteStartArray(); + foreach (var item in SourcePortRanges) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + NetworkSecurityGroupRule IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NetworkSecurityGroupRule)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeNetworkSecurityGroupRule(document.RootElement, options); + } + + internal static NetworkSecurityGroupRule DeserializeNetworkSecurityGroupRule(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + int priority = default; + NetworkSecurityGroupRuleAccess access = default; + string sourceAddressPrefix = default; + IList sourcePortRanges = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("priority"u8)) + { + priority = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("access"u8)) + { + access = new NetworkSecurityGroupRuleAccess(property.Value.GetString()); + continue; + } + if (property.NameEquals("sourceAddressPrefix"u8)) + { + sourceAddressPrefix = property.Value.GetString(); + continue; + } + if (property.NameEquals("sourcePortRanges"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + sourcePortRanges = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new NetworkSecurityGroupRule(priority, access, sourceAddressPrefix, sourcePortRanges ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(NetworkSecurityGroupRule)} does not support writing '{options.Format}' format."); + } + } + + NetworkSecurityGroupRule IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeNetworkSecurityGroupRule(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(NetworkSecurityGroupRule)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static NetworkSecurityGroupRule FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeNetworkSecurityGroupRule(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRule.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRule.cs new file mode 100644 index 0000000000000..5716920127432 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRule.cs @@ -0,0 +1,92 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A network security group rule to apply to an inbound endpoint. + public partial class NetworkSecurityGroupRule + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. + /// The action that should be taken for a specified IP address, subnet range or tag. + /// The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. + /// is null. + public NetworkSecurityGroupRule(int priority, NetworkSecurityGroupRuleAccess access, string sourceAddressPrefix) + { + Argument.AssertNotNull(sourceAddressPrefix, nameof(sourceAddressPrefix)); + + Priority = priority; + Access = access; + SourceAddressPrefix = sourceAddressPrefix; + SourcePortRanges = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. + /// The action that should be taken for a specified IP address, subnet range or tag. + /// The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. + /// The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. + /// Keeps track of any properties unknown to the library. + internal NetworkSecurityGroupRule(int priority, NetworkSecurityGroupRuleAccess access, string sourceAddressPrefix, IList sourcePortRanges, IDictionary serializedAdditionalRawData) + { + Priority = priority; + Access = access; + SourceAddressPrefix = sourceAddressPrefix; + SourcePortRanges = sourcePortRanges; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal NetworkSecurityGroupRule() + { + } + + /// The priority for this rule. Priorities within a Pool must be unique and are evaluated in order of priority. The lower the number the higher the priority. For example, rules could be specified with order numbers of 150, 250, and 350. The rule with the order number of 150 takes precedence over the rule that has an order of 250. Allowed priorities are 150 to 4096. If any reserved or duplicate values are provided the request fails with HTTP status code 400. + public int Priority { get; set; } + /// The action that should be taken for a specified IP address, subnet range or tag. + public NetworkSecurityGroupRuleAccess Access { get; set; } + /// The source address prefix or tag to match for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If any other values are provided the request fails with HTTP status code 400. + public string SourceAddressPrefix { get; set; } + /// The source port ranges to match for the rule. Valid values are '*' (for all ports 0 - 65535), a specific port (i.e. 22), or a port range (i.e. 100-200). The ports must be in the range of 0 to 65535. Each entry in this collection must not overlap any other entry (either a range or an individual port). If any other values are provided the request fails with HTTP status code 400. The default value is '*'. + public IList SourcePortRanges { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRuleAccess.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRuleAccess.cs new file mode 100644 index 0000000000000..9688b1326d842 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkSecurityGroupRuleAccess.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// NetworkSecurityGroupRuleAccess enums. + public readonly partial struct NetworkSecurityGroupRuleAccess : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public NetworkSecurityGroupRuleAccess(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AllowValue = "allow"; + private const string DenyValue = "deny"; + + /// Allow access. + public static NetworkSecurityGroupRuleAccess Allow { get; } = new NetworkSecurityGroupRuleAccess(AllowValue); + /// Deny access. + public static NetworkSecurityGroupRuleAccess Deny { get; } = new NetworkSecurityGroupRuleAccess(DenyValue); + /// Determines if two values are the same. + public static bool operator ==(NetworkSecurityGroupRuleAccess left, NetworkSecurityGroupRuleAccess right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(NetworkSecurityGroupRuleAccess left, NetworkSecurityGroupRuleAccess right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator NetworkSecurityGroupRuleAccess(string value) => new NetworkSecurityGroupRuleAccess(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is NetworkSecurityGroupRuleAccess other && Equals(other); + /// + public bool Equals(NetworkSecurityGroupRuleAccess other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NfsMountConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NfsMountConfiguration.Serialization.cs new file mode 100644 index 0000000000000..653d3f6cd0a12 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NfsMountConfiguration.Serialization.cs @@ -0,0 +1,154 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class NfsMountConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NfsMountConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("source"u8); + writer.WriteStringValue(Source); + writer.WritePropertyName("relativeMountPath"u8); + writer.WriteStringValue(RelativeMountPath); + if (Optional.IsDefined(MountOptions)) + { + writer.WritePropertyName("mountOptions"u8); + writer.WriteStringValue(MountOptions); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + NfsMountConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(NfsMountConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeNfsMountConfiguration(document.RootElement, options); + } + + internal static NfsMountConfiguration DeserializeNfsMountConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string source = default; + string relativeMountPath = default; + string mountOptions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("source"u8)) + { + source = property.Value.GetString(); + continue; + } + if (property.NameEquals("relativeMountPath"u8)) + { + relativeMountPath = property.Value.GetString(); + continue; + } + if (property.NameEquals("mountOptions"u8)) + { + mountOptions = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new NfsMountConfiguration(source, relativeMountPath, mountOptions, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(NfsMountConfiguration)} does not support writing '{options.Format}' format."); + } + } + + NfsMountConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeNfsMountConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(NfsMountConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static NfsMountConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeNfsMountConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NfsMountConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NfsMountConfiguration.cs new file mode 100644 index 0000000000000..dd8f27f7aed9c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NfsMountConfiguration.cs @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information used to connect to an NFS file system. + public partial class NfsMountConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URI of the file system to mount. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// or is null. + public NfsMountConfiguration(string source, string relativeMountPath) + { + Argument.AssertNotNull(source, nameof(source)); + Argument.AssertNotNull(relativeMountPath, nameof(relativeMountPath)); + + Source = source; + RelativeMountPath = relativeMountPath; + } + + /// Initializes a new instance of . + /// The URI of the file system to mount. + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + /// Keeps track of any properties unknown to the library. + internal NfsMountConfiguration(string source, string relativeMountPath, string mountOptions, IDictionary serializedAdditionalRawData) + { + Source = source; + RelativeMountPath = relativeMountPath; + MountOptions = mountOptions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal NfsMountConfiguration() + { + } + + /// The URI of the file system to mount. + public string Source { get; set; } + /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. + public string RelativeMountPath { get; set; } + /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. + public string MountOptions { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.Serialization.cs new file mode 100644 index 0000000000000..02a6a6a39c0d8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.Serialization.cs @@ -0,0 +1,208 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class OSDisk : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OSDisk)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(EphemeralOSDiskSettings)) + { + writer.WritePropertyName("ephemeralOSDiskSettings"u8); + writer.WriteObjectValue(EphemeralOSDiskSettings, options); + } + if (Optional.IsDefined(Caching)) + { + writer.WritePropertyName("caching"u8); + writer.WriteStringValue(Caching.Value.ToString()); + } + if (Optional.IsDefined(DiskSizeGB)) + { + writer.WritePropertyName("diskSizeGB"u8); + writer.WriteNumberValue(DiskSizeGB.Value); + } + if (Optional.IsDefined(ManagedDisk)) + { + writer.WritePropertyName("managedDisk"u8); + writer.WriteObjectValue(ManagedDisk, options); + } + if (Optional.IsDefined(WriteAcceleratorEnabled)) + { + writer.WritePropertyName("writeAcceleratorEnabled"u8); + writer.WriteBooleanValue(WriteAcceleratorEnabled.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OSDisk IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OSDisk)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOSDisk(document.RootElement, options); + } + + internal static OSDisk DeserializeOSDisk(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + DiffDiskSettings ephemeralOSDiskSettings = default; + CachingType? caching = default; + int? diskSizeGB = default; + ManagedDisk managedDisk = default; + bool? writeAcceleratorEnabled = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("ephemeralOSDiskSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + ephemeralOSDiskSettings = DiffDiskSettings.DeserializeDiffDiskSettings(property.Value, options); + continue; + } + if (property.NameEquals("caching"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + caching = new CachingType(property.Value.GetString()); + continue; + } + if (property.NameEquals("diskSizeGB"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + diskSizeGB = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("managedDisk"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + managedDisk = ManagedDisk.DeserializeManagedDisk(property.Value, options); + continue; + } + if (property.NameEquals("writeAcceleratorEnabled"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + writeAcceleratorEnabled = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new OSDisk( + ephemeralOSDiskSettings, + caching, + diskSizeGB, + managedDisk, + writeAcceleratorEnabled, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OSDisk)} does not support writing '{options.Format}' format."); + } + } + + OSDisk IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOSDisk(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OSDisk)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static OSDisk FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOSDisk(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.cs new file mode 100644 index 0000000000000..2c11fc67cba95 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Settings for the operating system disk of the compute node (VM). + public partial class OSDisk + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public OSDisk() + { + } + + /// Initializes a new instance of . + /// Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). + /// Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + /// The initial disk size in GB when creating new OS disk. + /// The managed disk parameters. + /// Specifies whether writeAccelerator should be enabled or disabled on the disk. + /// Keeps track of any properties unknown to the library. + internal OSDisk(DiffDiskSettings ephemeralOSDiskSettings, CachingType? caching, int? diskSizeGB, ManagedDisk managedDisk, bool? writeAcceleratorEnabled, IDictionary serializedAdditionalRawData) + { + EphemeralOSDiskSettings = ephemeralOSDiskSettings; + Caching = caching; + DiskSizeGB = diskSizeGB; + ManagedDisk = managedDisk; + WriteAcceleratorEnabled = writeAcceleratorEnabled; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). + public DiffDiskSettings EphemeralOSDiskSettings { get; set; } + /// Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. + public CachingType? Caching { get; set; } + /// The initial disk size in GB when creating new OS disk. + public int? DiskSizeGB { get; set; } + /// The managed disk parameters. + public ManagedDisk ManagedDisk { get; set; } + /// Specifies whether writeAccelerator should be enabled or disabled on the disk. + public bool? WriteAcceleratorEnabled { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OSType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OSType.cs new file mode 100644 index 0000000000000..c0e899131ee34 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OSType.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// OSType enums. + public readonly partial struct OSType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OSType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string LinuxValue = "linux"; + private const string WindowsValue = "windows"; + + /// The Linux operating system. + public static OSType Linux { get; } = new OSType(LinuxValue); + /// The Windows operating system. + public static OSType Windows { get; } = new OSType(WindowsValue); + /// Determines if two values are the same. + public static bool operator ==(OSType left, OSType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OSType left, OSType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OSType(string value) => new OSType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OSType other && Equals(other); + /// + public bool Equals(OSType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OnAllBatchTasksComplete.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OnAllBatchTasksComplete.cs new file mode 100644 index 0000000000000..20a88b7cef207 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OnAllBatchTasksComplete.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// The action the Batch service should take when all Tasks in the Job are in the completed state. + public readonly partial struct OnAllBatchTasksComplete : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OnAllBatchTasksComplete(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoActionValue = "noaction"; + private const string TerminateJobValue = "terminatejob"; + + /// Do nothing. The Job remains active unless terminated or disabled by some other means. + public static OnAllBatchTasksComplete NoAction { get; } = new OnAllBatchTasksComplete(NoActionValue); + /// Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'. + public static OnAllBatchTasksComplete TerminateJob { get; } = new OnAllBatchTasksComplete(TerminateJobValue); + /// Determines if two values are the same. + public static bool operator ==(OnAllBatchTasksComplete left, OnAllBatchTasksComplete right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OnAllBatchTasksComplete left, OnAllBatchTasksComplete right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OnAllBatchTasksComplete(string value) => new OnAllBatchTasksComplete(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OnAllBatchTasksComplete other && Equals(other); + /// + public bool Equals(OnAllBatchTasksComplete other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OnBatchTaskFailure.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OnBatchTaskFailure.cs new file mode 100644 index 0000000000000..918fbb8985e50 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OnBatchTaskFailure.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// OnTaskFailure enums. + public readonly partial struct OnBatchTaskFailure : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OnBatchTaskFailure(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoActionValue = "noaction"; + private const string PerformExitOptionsJobActionValue = "performexitoptionsjobaction"; + + /// Do nothing. The Job remains active unless terminated or disabled by some other means. + public static OnBatchTaskFailure NoAction { get; } = new OnBatchTaskFailure(NoActionValue); + /// Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'. + public static OnBatchTaskFailure PerformExitOptionsJobAction { get; } = new OnBatchTaskFailure(PerformExitOptionsJobActionValue); + /// Determines if two values are the same. + public static bool operator ==(OnBatchTaskFailure left, OnBatchTaskFailure right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OnBatchTaskFailure left, OnBatchTaskFailure right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OnBatchTaskFailure(string value) => new OnBatchTaskFailure(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OnBatchTaskFailure other && Equals(other); + /// + public bool Equals(OnBatchTaskFailure other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFile.Serialization.cs new file mode 100644 index 0000000000000..070e9004d0dbc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFile.Serialization.cs @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class OutputFile : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFile)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("filePattern"u8); + writer.WriteStringValue(FilePattern); + writer.WritePropertyName("destination"u8); + writer.WriteObjectValue(Destination, options); + writer.WritePropertyName("uploadOptions"u8); + writer.WriteObjectValue(UploadOptions, options); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OutputFile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFile)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOutputFile(document.RootElement, options); + } + + internal static OutputFile DeserializeOutputFile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string filePattern = default; + OutputFileDestination destination = default; + OutputFileUploadConfig uploadOptions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("filePattern"u8)) + { + filePattern = property.Value.GetString(); + continue; + } + if (property.NameEquals("destination"u8)) + { + destination = OutputFileDestination.DeserializeOutputFileDestination(property.Value, options); + continue; + } + if (property.NameEquals("uploadOptions"u8)) + { + uploadOptions = OutputFileUploadConfig.DeserializeOutputFileUploadConfig(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new OutputFile(filePattern, destination, uploadOptions, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OutputFile)} does not support writing '{options.Format}' format."); + } + } + + OutputFile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOutputFile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OutputFile)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static OutputFile FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOutputFile(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFile.cs new file mode 100644 index 0000000000000..cb6966bffce1d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFile.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// On every file uploads, Batch service writes two log files to the compute node, 'fileuploadout.txt' and 'fileuploaderr.txt'. These log files are used to learn more about a specific failure. + public partial class OutputFile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. Brackets can include a negation to match any character not specified (for example [!abc] matches any character but a, b, or c). If a file name starts with "." it is ignored by default but may be matched by specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\*.txt matches any file that does not start in '.' and ends with .txt in the Task working directory or any subdirectory. If the filename contains a wildcard character it can be escaped using brackets (for example abc[*] would match a file named abc*). Note that both \ and / are treated as directory separators on Windows, but only / is on Linux. Environment variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied. + /// The destination for the output file(s). + /// Additional options for the upload operation, including under what conditions to perform the upload. + /// , or is null. + public OutputFile(string filePattern, OutputFileDestination destination, OutputFileUploadConfig uploadOptions) + { + Argument.AssertNotNull(filePattern, nameof(filePattern)); + Argument.AssertNotNull(destination, nameof(destination)); + Argument.AssertNotNull(uploadOptions, nameof(uploadOptions)); + + FilePattern = filePattern; + Destination = destination; + UploadOptions = uploadOptions; + } + + /// Initializes a new instance of . + /// A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. Brackets can include a negation to match any character not specified (for example [!abc] matches any character but a, b, or c). If a file name starts with "." it is ignored by default but may be matched by specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\*.txt matches any file that does not start in '.' and ends with .txt in the Task working directory or any subdirectory. If the filename contains a wildcard character it can be escaped using brackets (for example abc[*] would match a file named abc*). Note that both \ and / are treated as directory separators on Windows, but only / is on Linux. Environment variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied. + /// The destination for the output file(s). + /// Additional options for the upload operation, including under what conditions to perform the upload. + /// Keeps track of any properties unknown to the library. + internal OutputFile(string filePattern, OutputFileDestination destination, OutputFileUploadConfig uploadOptions, IDictionary serializedAdditionalRawData) + { + FilePattern = filePattern; + Destination = destination; + UploadOptions = uploadOptions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal OutputFile() + { + } + + /// A pattern indicating which file(s) to upload. Both relative and absolute paths are supported. Relative paths are relative to the Task working directory. The following wildcards are supported: * matches 0 or more characters (for example pattern abc* would match abc or abcdef), ** matches any directory, ? matches any single character, [abc] matches one character in the brackets, and [a-c] matches one character in the range. Brackets can include a negation to match any character not specified (for example [!abc] matches any character but a, b, or c). If a file name starts with "." it is ignored by default but may be matched by specifying it explicitly (for example *.gif will not match .a.gif, but .*.gif will). A simple example: **\*.txt matches any file that does not start in '.' and ends with .txt in the Task working directory or any subdirectory. If the filename contains a wildcard character it can be escaped using brackets (for example abc[*] would match a file named abc*). Note that both \ and / are treated as directory separators on Windows, but only / is on Linux. Environment variables (%var% on Windows or $var on Linux) are expanded prior to the pattern being applied. + public string FilePattern { get; set; } + /// The destination for the output file(s). + public OutputFileDestination Destination { get; set; } + /// Additional options for the upload operation, including under what conditions to perform the upload. + public OutputFileUploadConfig UploadOptions { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs new file mode 100644 index 0000000000000..059dd2156abbd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs @@ -0,0 +1,186 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class OutputFileBlobContainerDestination : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFileBlobContainerDestination)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Path)) + { + writer.WritePropertyName("path"u8); + writer.WriteStringValue(Path); + } + writer.WritePropertyName("containerUrl"u8); + writer.WriteStringValue(ContainerUrl); + if (Optional.IsDefined(IdentityReference)) + { + writer.WritePropertyName("identityReference"u8); + writer.WriteObjectValue(IdentityReference, options); + } + if (Optional.IsCollectionDefined(UploadHeaders)) + { + writer.WritePropertyName("uploadHeaders"u8); + writer.WriteStartArray(); + foreach (var item in UploadHeaders) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OutputFileBlobContainerDestination IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFileBlobContainerDestination)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOutputFileBlobContainerDestination(document.RootElement, options); + } + + internal static OutputFileBlobContainerDestination DeserializeOutputFileBlobContainerDestination(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string path = default; + string containerUrl = default; + BatchNodeIdentityReference identityReference = default; + IList uploadHeaders = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("path"u8)) + { + path = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerUrl"u8)) + { + containerUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("identityReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identityReference = BatchNodeIdentityReference.DeserializeBatchNodeIdentityReference(property.Value, options); + continue; + } + if (property.NameEquals("uploadHeaders"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(HttpHeader.DeserializeHttpHeader(item, options)); + } + uploadHeaders = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new OutputFileBlobContainerDestination(path, containerUrl, identityReference, uploadHeaders ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OutputFileBlobContainerDestination)} does not support writing '{options.Format}' format."); + } + } + + OutputFileBlobContainerDestination IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOutputFileBlobContainerDestination(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OutputFileBlobContainerDestination)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static OutputFileBlobContainerDestination FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOutputFileBlobContainerDestination(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs new file mode 100644 index 0000000000000..44d0f78eef822 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs @@ -0,0 +1,88 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies a file upload destination within an Azure blob storage container. + public partial class OutputFileBlobContainerDestination + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. + /// is null. + public OutputFileBlobContainerDestination(string containerUrl) + { + Argument.AssertNotNull(containerUrl, nameof(containerUrl)); + + ContainerUrl = containerUrl; + UploadHeaders = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. + /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. + /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. + /// Keeps track of any properties unknown to the library. + internal OutputFileBlobContainerDestination(string path, string containerUrl, BatchNodeIdentityReference identityReference, IList uploadHeaders, IDictionary serializedAdditionalRawData) + { + Path = path; + ContainerUrl = containerUrl; + IdentityReference = identityReference; + UploadHeaders = uploadHeaders; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal OutputFileBlobContainerDestination() + { + } + + /// The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. + public string Path { get; set; } + /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. + public string ContainerUrl { get; set; } + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. + public BatchNodeIdentityReference IdentityReference { get; set; } + /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#request-headers-all-blob-types. + public IList UploadHeaders { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileDestination.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileDestination.Serialization.cs new file mode 100644 index 0000000000000..6f34ab1cfc7ec --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileDestination.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class OutputFileDestination : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFileDestination)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Container)) + { + writer.WritePropertyName("container"u8); + writer.WriteObjectValue(Container, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OutputFileDestination IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFileDestination)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOutputFileDestination(document.RootElement, options); + } + + internal static OutputFileDestination DeserializeOutputFileDestination(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OutputFileBlobContainerDestination container = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("container"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + container = OutputFileBlobContainerDestination.DeserializeOutputFileBlobContainerDestination(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new OutputFileDestination(container, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OutputFileDestination)} does not support writing '{options.Format}' format."); + } + } + + OutputFileDestination IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOutputFileDestination(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OutputFileDestination)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static OutputFileDestination FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOutputFileDestination(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileDestination.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileDestination.cs new file mode 100644 index 0000000000000..4967dff392581 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileDestination.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The destination to which a file should be uploaded. + public partial class OutputFileDestination + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public OutputFileDestination() + { + } + + /// Initializes a new instance of . + /// A location in Azure blob storage to which files are uploaded. + /// Keeps track of any properties unknown to the library. + internal OutputFileDestination(OutputFileBlobContainerDestination container, IDictionary serializedAdditionalRawData) + { + Container = container; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// A location in Azure blob storage to which files are uploaded. + public OutputFileBlobContainerDestination Container { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadCondition.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadCondition.cs new file mode 100644 index 0000000000000..848d4139bd3ac --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadCondition.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// OutputFileUploadCondition enums. + public readonly partial struct OutputFileUploadCondition : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public OutputFileUploadCondition(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TaskSuccessValue = "tasksuccess"; + private const string TaskFailureValue = "taskfailure"; + private const string TaskCompletionValue = "taskcompletion"; + + /// Upload the file(s) only after the Task process exits with an exit code of 0. + public static OutputFileUploadCondition TaskSuccess { get; } = new OutputFileUploadCondition(TaskSuccessValue); + /// Upload the file(s) only after the Task process exits with a nonzero exit code. + public static OutputFileUploadCondition TaskFailure { get; } = new OutputFileUploadCondition(TaskFailureValue); + /// Upload the file(s) after the Task process exits, no matter what the exit code was. + public static OutputFileUploadCondition TaskCompletion { get; } = new OutputFileUploadCondition(TaskCompletionValue); + /// Determines if two values are the same. + public static bool operator ==(OutputFileUploadCondition left, OutputFileUploadCondition right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(OutputFileUploadCondition left, OutputFileUploadCondition right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator OutputFileUploadCondition(string value) => new OutputFileUploadCondition(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is OutputFileUploadCondition other && Equals(other); + /// + public bool Equals(OutputFileUploadCondition other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadConfig.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadConfig.Serialization.cs new file mode 100644 index 0000000000000..d5204fa3cb798 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadConfig.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class OutputFileUploadConfig : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFileUploadConfig)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("uploadCondition"u8); + writer.WriteStringValue(UploadCondition.ToString()); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + OutputFileUploadConfig IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(OutputFileUploadConfig)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeOutputFileUploadConfig(document.RootElement, options); + } + + internal static OutputFileUploadConfig DeserializeOutputFileUploadConfig(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + OutputFileUploadCondition uploadCondition = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("uploadCondition"u8)) + { + uploadCondition = new OutputFileUploadCondition(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new OutputFileUploadConfig(uploadCondition, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(OutputFileUploadConfig)} does not support writing '{options.Format}' format."); + } + } + + OutputFileUploadConfig IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeOutputFileUploadConfig(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(OutputFileUploadConfig)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static OutputFileUploadConfig FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeOutputFileUploadConfig(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadConfig.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadConfig.cs new file mode 100644 index 0000000000000..449b7385086d9 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadConfig.cs @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Options for an output file upload operation, including under what conditions + /// to perform the upload. + /// + public partial class OutputFileUploadConfig + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + public OutputFileUploadConfig(OutputFileUploadCondition uploadCondition) + { + UploadCondition = uploadCondition; + } + + /// Initializes a new instance of . + /// The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + /// Keeps track of any properties unknown to the library. + internal OutputFileUploadConfig(OutputFileUploadCondition uploadCondition, IDictionary serializedAdditionalRawData) + { + UploadCondition = uploadCondition; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal OutputFileUploadConfig() + { + } + + /// The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. + public OutputFileUploadCondition UploadCondition { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.Serialization.cs new file mode 100644 index 0000000000000..e98eca4d6d31a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.Serialization.cs @@ -0,0 +1,167 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class PublicIpAddressConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(IpAddressProvisioningType)) + { + writer.WritePropertyName("provision"u8); + writer.WriteStringValue(IpAddressProvisioningType.Value.ToString()); + } + if (Optional.IsCollectionDefined(IpAddressIds)) + { + writer.WritePropertyName("ipAddressIds"u8); + writer.WriteStartArray(); + foreach (var item in IpAddressIds) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + PublicIpAddressConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializePublicIpAddressConfiguration(document.RootElement, options); + } + + internal static PublicIpAddressConfiguration DeserializePublicIpAddressConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + IpAddressProvisioningType? provision = default; + IList ipAddressIds = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("provision"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + provision = new IpAddressProvisioningType(property.Value.GetString()); + continue; + } + if (property.NameEquals("ipAddressIds"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + ipAddressIds = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new PublicIpAddressConfiguration(provision, ipAddressIds ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support writing '{options.Format}' format."); + } + } + + PublicIpAddressConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializePublicIpAddressConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static PublicIpAddressConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializePublicIpAddressConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.cs new file mode 100644 index 0000000000000..3af8d5206e919 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.cs @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The public IP Address configuration of the networking configuration of a Pool. + public partial class PublicIpAddressConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public PublicIpAddressConfiguration() + { + IpAddressIds = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + /// The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + /// Keeps track of any properties unknown to the library. + internal PublicIpAddressConfiguration(IpAddressProvisioningType? ipAddressProvisioningType, IList ipAddressIds, IDictionary serializedAdditionalRawData) + { + IpAddressProvisioningType = ipAddressProvisioningType; + IpAddressIds = ipAddressIds; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. + public IpAddressProvisioningType? IpAddressProvisioningType { get; set; } + /// The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. + public IList IpAddressIds { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs new file mode 100644 index 0000000000000..2d448fb013eeb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class RecentBatchJob : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RecentBatchJob)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Id)) + { + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + } + if (Optional.IsDefined(Url)) + { + writer.WritePropertyName("url"u8); + writer.WriteStringValue(Url); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RecentBatchJob IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RecentBatchJob)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRecentBatchJob(document.RootElement, options); + } + + internal static RecentBatchJob DeserializeRecentBatchJob(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + string url = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (property.NameEquals("url"u8)) + { + url = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new RecentBatchJob(id, url, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RecentBatchJob)} does not support writing '{options.Format}' format."); + } + } + + RecentBatchJob IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRecentBatchJob(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RecentBatchJob)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static RecentBatchJob FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRecentBatchJob(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs new file mode 100644 index 0000000000000..b3de5d45026bb --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Information about the most recent Job to run under the Job Schedule. + public partial class RecentBatchJob + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public RecentBatchJob() + { + } + + /// Initializes a new instance of . + /// The ID of the Job. + /// The URL of the Job. + /// Keeps track of any properties unknown to the library. + internal RecentBatchJob(string id, string url, IDictionary serializedAdditionalRawData) + { + Id = id; + Url = url; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The ID of the Job. + public string Id { get; set; } + /// The URL of the Job. + public string Url { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ResizeError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ResizeError.Serialization.cs new file mode 100644 index 0000000000000..c65fd7a6c0c36 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ResizeError.Serialization.cs @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ResizeError : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ResizeError)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Code)) + { + writer.WritePropertyName("code"u8); + writer.WriteStringValue(Code); + } + if (Optional.IsDefined(Message)) + { + writer.WritePropertyName("message"u8); + writer.WriteStringValue(Message); + } + if (Optional.IsCollectionDefined(Values)) + { + writer.WritePropertyName("values"u8); + writer.WriteStartArray(); + foreach (var item in Values) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ResizeError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ResizeError)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeResizeError(document.RootElement, options); + } + + internal static ResizeError DeserializeResizeError(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string code = default; + string message = default; + IReadOnlyList values = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("code"u8)) + { + code = property.Value.GetString(); + continue; + } + if (property.NameEquals("message"u8)) + { + message = property.Value.GetString(); + continue; + } + if (property.NameEquals("values"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(NameValuePair.DeserializeNameValuePair(item, options)); + } + values = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ResizeError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ResizeError)} does not support writing '{options.Format}' format."); + } + } + + ResizeError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeResizeError(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ResizeError)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ResizeError FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeResizeError(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ResizeError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ResizeError.cs new file mode 100644 index 0000000000000..f01ef629223dd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ResizeError.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// An error that occurred when resizing a Pool. + public partial class ResizeError + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal ResizeError() + { + Values = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. + /// A message describing the Pool resize error, intended to be suitable for display in a user interface. + /// A list of additional error details related to the Pool resize error. + /// Keeps track of any properties unknown to the library. + internal ResizeError(string code, string message, IReadOnlyList values, IDictionary serializedAdditionalRawData) + { + Code = code; + Message = message; + Values = values; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// An identifier for the Pool resize error. Codes are invariant and are intended to be consumed programmatically. + public string Code { get; } + /// A message describing the Pool resize error, intended to be suitable for display in a user interface. + public string Message { get; } + /// A list of additional error details related to the Pool resize error. + public IReadOnlyList Values { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs new file mode 100644 index 0000000000000..324491f6f9266 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ResourceFile : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ResourceFile)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(AutoStorageContainerName)) + { + writer.WritePropertyName("autoStorageContainerName"u8); + writer.WriteStringValue(AutoStorageContainerName); + } + if (Optional.IsDefined(StorageContainerUrl)) + { + writer.WritePropertyName("storageContainerUrl"u8); + writer.WriteStringValue(StorageContainerUrl); + } + if (Optional.IsDefined(HttpUrl)) + { + writer.WritePropertyName("httpUrl"u8); + writer.WriteStringValue(HttpUrl); + } + if (Optional.IsDefined(BlobPrefix)) + { + writer.WritePropertyName("blobPrefix"u8); + writer.WriteStringValue(BlobPrefix); + } + if (Optional.IsDefined(FilePath)) + { + writer.WritePropertyName("filePath"u8); + writer.WriteStringValue(FilePath); + } + if (Optional.IsDefined(FileMode)) + { + writer.WritePropertyName("fileMode"u8); + writer.WriteStringValue(FileMode); + } + if (Optional.IsDefined(IdentityReference)) + { + writer.WritePropertyName("identityReference"u8); + writer.WriteObjectValue(IdentityReference, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ResourceFile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ResourceFile)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeResourceFile(document.RootElement, options); + } + + internal static ResourceFile DeserializeResourceFile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string autoStorageContainerName = default; + string storageContainerUrl = default; + string httpUrl = default; + string blobPrefix = default; + string filePath = default; + string fileMode = default; + BatchNodeIdentityReference identityReference = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("autoStorageContainerName"u8)) + { + autoStorageContainerName = property.Value.GetString(); + continue; + } + if (property.NameEquals("storageContainerUrl"u8)) + { + storageContainerUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("httpUrl"u8)) + { + httpUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("blobPrefix"u8)) + { + blobPrefix = property.Value.GetString(); + continue; + } + if (property.NameEquals("filePath"u8)) + { + filePath = property.Value.GetString(); + continue; + } + if (property.NameEquals("fileMode"u8)) + { + fileMode = property.Value.GetString(); + continue; + } + if (property.NameEquals("identityReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identityReference = BatchNodeIdentityReference.DeserializeBatchNodeIdentityReference(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ResourceFile( + autoStorageContainerName, + storageContainerUrl, + httpUrl, + blobPrefix, + filePath, + fileMode, + identityReference, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ResourceFile)} does not support writing '{options.Format}' format."); + } + } + + ResourceFile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeResourceFile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ResourceFile)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ResourceFile FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeResourceFile(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs new file mode 100644 index 0000000000000..b404259100420 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// A single file or multiple files to be downloaded to a Compute Node. + public partial class ResourceFile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public ResourceFile() + { + } + + /// Initializes a new instance of . + /// The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. + /// The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. + /// The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. + /// The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. + /// The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). + /// The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file. + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl. + /// Keeps track of any properties unknown to the library. + internal ResourceFile(string autoStorageContainerName, string storageContainerUrl, string httpUrl, string blobPrefix, string filePath, string fileMode, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + { + AutoStorageContainerName = autoStorageContainerName; + StorageContainerUrl = storageContainerUrl; + HttpUrl = httpUrl; + BlobPrefix = blobPrefix; + FilePath = filePath; + FileMode = fileMode; + IdentityReference = identityReference; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. + public string AutoStorageContainerName { get; set; } + /// The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. + public string StorageContainerUrl { get; set; } + /// The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. + public string HttpUrl { get; set; } + /// The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. + public string BlobPrefix { get; set; } + /// The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). + public string FilePath { get; set; } + /// The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file. + public string FileMode { get; set; } + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl. + public BatchNodeIdentityReference IdentityReference { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/RollingUpgradePolicy.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/RollingUpgradePolicy.Serialization.cs new file mode 100644 index 0000000000000..09b39011e43ba --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/RollingUpgradePolicy.Serialization.cs @@ -0,0 +1,240 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class RollingUpgradePolicy : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RollingUpgradePolicy)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(EnableCrossZoneUpgrade)) + { + writer.WritePropertyName("enableCrossZoneUpgrade"u8); + writer.WriteBooleanValue(EnableCrossZoneUpgrade.Value); + } + if (Optional.IsDefined(MaxBatchInstancePercent)) + { + writer.WritePropertyName("maxBatchInstancePercent"u8); + writer.WriteNumberValue(MaxBatchInstancePercent.Value); + } + if (Optional.IsDefined(MaxUnhealthyInstancePercent)) + { + writer.WritePropertyName("maxUnhealthyInstancePercent"u8); + writer.WriteNumberValue(MaxUnhealthyInstancePercent.Value); + } + if (Optional.IsDefined(MaxUnhealthyUpgradedInstancePercent)) + { + writer.WritePropertyName("maxUnhealthyUpgradedInstancePercent"u8); + writer.WriteNumberValue(MaxUnhealthyUpgradedInstancePercent.Value); + } + if (Optional.IsDefined(PauseTimeBetweenBatches)) + { + writer.WritePropertyName("pauseTimeBetweenBatches"u8); + writer.WriteStringValue(PauseTimeBetweenBatches.Value, "P"); + } + if (Optional.IsDefined(PrioritizeUnhealthyInstances)) + { + writer.WritePropertyName("prioritizeUnhealthyInstances"u8); + writer.WriteBooleanValue(PrioritizeUnhealthyInstances.Value); + } + if (Optional.IsDefined(RollbackFailedInstancesOnPolicyBreach)) + { + writer.WritePropertyName("rollbackFailedInstancesOnPolicyBreach"u8); + writer.WriteBooleanValue(RollbackFailedInstancesOnPolicyBreach.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + RollingUpgradePolicy IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(RollingUpgradePolicy)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeRollingUpgradePolicy(document.RootElement, options); + } + + internal static RollingUpgradePolicy DeserializeRollingUpgradePolicy(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool? enableCrossZoneUpgrade = default; + int? maxBatchInstancePercent = default; + int? maxUnhealthyInstancePercent = default; + int? maxUnhealthyUpgradedInstancePercent = default; + TimeSpan? pauseTimeBetweenBatches = default; + bool? prioritizeUnhealthyInstances = default; + bool? rollbackFailedInstancesOnPolicyBreach = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("enableCrossZoneUpgrade"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableCrossZoneUpgrade = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("maxBatchInstancePercent"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxBatchInstancePercent = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("maxUnhealthyInstancePercent"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxUnhealthyInstancePercent = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("maxUnhealthyUpgradedInstancePercent"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + maxUnhealthyUpgradedInstancePercent = property.Value.GetInt32(); + continue; + } + if (property.NameEquals("pauseTimeBetweenBatches"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + pauseTimeBetweenBatches = property.Value.GetTimeSpan("P"); + continue; + } + if (property.NameEquals("prioritizeUnhealthyInstances"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + prioritizeUnhealthyInstances = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("rollbackFailedInstancesOnPolicyBreach"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + rollbackFailedInstancesOnPolicyBreach = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new RollingUpgradePolicy( + enableCrossZoneUpgrade, + maxBatchInstancePercent, + maxUnhealthyInstancePercent, + maxUnhealthyUpgradedInstancePercent, + pauseTimeBetweenBatches, + prioritizeUnhealthyInstances, + rollbackFailedInstancesOnPolicyBreach, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(RollingUpgradePolicy)} does not support writing '{options.Format}' format."); + } + } + + RollingUpgradePolicy IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeRollingUpgradePolicy(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(RollingUpgradePolicy)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static RollingUpgradePolicy FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeRollingUpgradePolicy(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/RollingUpgradePolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/RollingUpgradePolicy.cs new file mode 100644 index 0000000000000..00654d33a88fa --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/RollingUpgradePolicy.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The configuration parameters used while performing a rolling upgrade. + public partial class RollingUpgradePolicy + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public RollingUpgradePolicy() + { + } + + /// Initializes a new instance of . + /// Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the batch size. This field is able to be set to true or false only when using NodePlacementConfiguration as Zonal. + /// The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The value of this field should be between 5 and 100, inclusive. If both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned with value, the value of maxBatchInstancePercent should not be more than maxUnhealthyInstancePercent. + /// The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The value of this field should be between 5 and 100, inclusive. If both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned with value, the value of maxBatchInstancePercent should not be more than maxUnhealthyInstancePercent. + /// The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The value of this field should be between 0 and 100, inclusive. + /// The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format.. + /// Upgrade all unhealthy instances in a scale set before any healthy instances. + /// Rollback failed instances to previous model if the Rolling Upgrade policy is violated. + /// Keeps track of any properties unknown to the library. + internal RollingUpgradePolicy(bool? enableCrossZoneUpgrade, int? maxBatchInstancePercent, int? maxUnhealthyInstancePercent, int? maxUnhealthyUpgradedInstancePercent, TimeSpan? pauseTimeBetweenBatches, bool? prioritizeUnhealthyInstances, bool? rollbackFailedInstancesOnPolicyBreach, IDictionary serializedAdditionalRawData) + { + EnableCrossZoneUpgrade = enableCrossZoneUpgrade; + MaxBatchInstancePercent = maxBatchInstancePercent; + MaxUnhealthyInstancePercent = maxUnhealthyInstancePercent; + MaxUnhealthyUpgradedInstancePercent = maxUnhealthyUpgradedInstancePercent; + PauseTimeBetweenBatches = pauseTimeBetweenBatches; + PrioritizeUnhealthyInstances = prioritizeUnhealthyInstances; + RollbackFailedInstancesOnPolicyBreach = rollbackFailedInstancesOnPolicyBreach; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent to determine the batch size. This field is able to be set to true or false only when using NodePlacementConfiguration as Zonal. + public bool? EnableCrossZoneUpgrade { get; set; } + /// The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The value of this field should be between 5 and 100, inclusive. If both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned with value, the value of maxBatchInstancePercent should not be more than maxUnhealthyInstancePercent. + public int? MaxBatchInstancePercent { get; set; } + /// The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The value of this field should be between 5 and 100, inclusive. If both maxBatchInstancePercent and maxUnhealthyInstancePercent are assigned with value, the value of maxBatchInstancePercent should not be more than maxUnhealthyInstancePercent. + public int? MaxUnhealthyInstancePercent { get; set; } + /// The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The value of this field should be between 0 and 100, inclusive. + public int? MaxUnhealthyUpgradedInstancePercent { get; set; } + /// The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format.. + public TimeSpan? PauseTimeBetweenBatches { get; set; } + /// Upgrade all unhealthy instances in a scale set before any healthy instances. + public bool? PrioritizeUnhealthyInstances { get; set; } + /// Rollback failed instances to previous model if the Rolling Upgrade policy is violated. + public bool? RollbackFailedInstancesOnPolicyBreach { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SchedulingState.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SchedulingState.cs new file mode 100644 index 0000000000000..97792f09283ac --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SchedulingState.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// SchedulingState enums. + public readonly partial struct SchedulingState : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public SchedulingState(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string EnabledValue = "enabled"; + private const string DisabledValue = "disabled"; + + /// Tasks can be scheduled on the Compute Node. + public static SchedulingState Enabled { get; } = new SchedulingState(EnabledValue); + /// No new Tasks will be scheduled on the Compute Node. Tasks already running on the Compute Node may still run to completion. All Compute Nodes start with scheduling enabled. + public static SchedulingState Disabled { get; } = new SchedulingState(DisabledValue); + /// Determines if two values are the same. + public static bool operator ==(SchedulingState left, SchedulingState right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(SchedulingState left, SchedulingState right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator SchedulingState(string value) => new SchedulingState(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is SchedulingState other && Equals(other); + /// + public bool Equals(SchedulingState other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs new file mode 100644 index 0000000000000..14ea5a55a6077 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs @@ -0,0 +1,151 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class SecurityProfile : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SecurityProfile)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("encryptionAtHost"u8); + writer.WriteBooleanValue(EncryptionAtHost); + writer.WritePropertyName("securityType"u8); + writer.WriteStringValue(SecurityType.ToString()); + writer.WritePropertyName("uefiSettings"u8); + writer.WriteObjectValue(UefiSettings, options); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + SecurityProfile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(SecurityProfile)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeSecurityProfile(document.RootElement, options); + } + + internal static SecurityProfile DeserializeSecurityProfile(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool encryptionAtHost = default; + SecurityTypes securityType = default; + UefiSettings uefiSettings = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("encryptionAtHost"u8)) + { + encryptionAtHost = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("securityType"u8)) + { + securityType = new SecurityTypes(property.Value.GetString()); + continue; + } + if (property.NameEquals("uefiSettings"u8)) + { + uefiSettings = UefiSettings.DeserializeUefiSettings(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new SecurityProfile(encryptionAtHost, securityType, uefiSettings, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(SecurityProfile)} does not support writing '{options.Format}' format."); + } + } + + SecurityProfile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeSecurityProfile(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(SecurityProfile)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static SecurityProfile FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeSecurityProfile(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs new file mode 100644 index 0000000000000..4f81495d150a0 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies the security profile settings for the virtual machine or virtual machine scale set. + public partial class SecurityProfile + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. + /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + /// is null. + public SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, UefiSettings uefiSettings) + { + Argument.AssertNotNull(uefiSettings, nameof(uefiSettings)); + + EncryptionAtHost = encryptionAtHost; + SecurityType = securityType; + UefiSettings = uefiSettings; + } + + /// Initializes a new instance of . + /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. + /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + /// Keeps track of any properties unknown to the library. + internal SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, UefiSettings uefiSettings, IDictionary serializedAdditionalRawData) + { + EncryptionAtHost = encryptionAtHost; + SecurityType = securityType; + UefiSettings = uefiSettings; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal SecurityProfile() + { + } + + /// This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the encryption for all the disks including Resource/Temp disk at host itself. + public bool EncryptionAtHost { get; set; } + /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + public SecurityTypes SecurityType { get; set; } + /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + public UefiSettings UefiSettings { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs new file mode 100644 index 0000000000000..aabb69661ef5a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityTypes.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + public readonly partial struct SecurityTypes : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public SecurityTypes(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string TrustedLaunchValue = "trustedLaunch"; + + /// Trusted launch protects against advanced and persistent attack techniques. + public static SecurityTypes TrustedLaunch { get; } = new SecurityTypes(TrustedLaunchValue); + /// Determines if two values are the same. + public static bool operator ==(SecurityTypes left, SecurityTypes right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(SecurityTypes left, SecurityTypes right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator SecurityTypes(string value) => new SecurityTypes(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is SecurityTypes other && Equals(other); + /// + public bool Equals(SecurityTypes other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ServiceArtifactReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ServiceArtifactReference.Serialization.cs new file mode 100644 index 0000000000000..be9f32cd663bd --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ServiceArtifactReference.Serialization.cs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class ServiceArtifactReference : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ServiceArtifactReference)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("id"u8); + writer.WriteStringValue(Id); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + ServiceArtifactReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(ServiceArtifactReference)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeServiceArtifactReference(document.RootElement, options); + } + + internal static ServiceArtifactReference DeserializeServiceArtifactReference(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string id = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("id"u8)) + { + id = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new ServiceArtifactReference(id, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(ServiceArtifactReference)} does not support writing '{options.Format}' format."); + } + } + + ServiceArtifactReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeServiceArtifactReference(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(ServiceArtifactReference)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static ServiceArtifactReference FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeServiceArtifactReference(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ServiceArtifactReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ServiceArtifactReference.cs new file mode 100644 index 0000000000000..5f7e8a17afa6d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ServiceArtifactReference.cs @@ -0,0 +1,78 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Specifies the service artifact reference id used to set same image version + /// for all virtual machines in the scale set when using 'latest' image version. + /// + public partial class ServiceArtifactReference + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The service artifact reference id of ServiceArtifactReference. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. + /// is null. + public ServiceArtifactReference(string id) + { + Argument.AssertNotNull(id, nameof(id)); + + Id = id; + } + + /// Initializes a new instance of . + /// The service artifact reference id of ServiceArtifactReference. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. + /// Keeps track of any properties unknown to the library. + internal ServiceArtifactReference(string id, IDictionary serializedAdditionalRawData) + { + Id = id; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal ServiceArtifactReference() + { + } + + /// The service artifact reference id of ServiceArtifactReference. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. + public string Id { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/StatusLevelTypes.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/StatusLevelTypes.cs new file mode 100644 index 0000000000000..2ffe6c6ff1ab6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/StatusLevelTypes.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// Level code. + public readonly partial struct StatusLevelTypes : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public StatusLevelTypes(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string ErrorValue = "Error"; + private const string InfoValue = "Info"; + private const string WarningValue = "Warning"; + + /// Error. + public static StatusLevelTypes Error { get; } = new StatusLevelTypes(ErrorValue); + /// Info. + public static StatusLevelTypes Info { get; } = new StatusLevelTypes(InfoValue); + /// Warning. + public static StatusLevelTypes Warning { get; } = new StatusLevelTypes(WarningValue); + /// Determines if two values are the same. + public static bool operator ==(StatusLevelTypes left, StatusLevelTypes right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(StatusLevelTypes left, StatusLevelTypes right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator StatusLevelTypes(string value) => new StatusLevelTypes(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is StatusLevelTypes other && Equals(other); + /// + public bool Equals(StatusLevelTypes other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/StorageAccountType.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/StorageAccountType.cs new file mode 100644 index 0000000000000..150c5b8a946ab --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/StorageAccountType.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// StorageAccountType enums. + public readonly partial struct StorageAccountType : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public StorageAccountType(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string StandardLRSValue = "standard_lrs"; + private const string PremiumLRSValue = "premium_lrs"; + private const string StandardSSDLRSValue = "standardssd_lrs"; + + /// The data disk should use standard locally redundant storage. + public static StorageAccountType StandardLRS { get; } = new StorageAccountType(StandardLRSValue); + /// The data disk should use premium locally redundant storage. + public static StorageAccountType PremiumLRS { get; } = new StorageAccountType(PremiumLRSValue); + /// The data disk / OS disk should use standard SSD locally redundant storage. + public static StorageAccountType StandardSSDLRS { get; } = new StorageAccountType(StandardSSDLRSValue); + /// Determines if two values are the same. + public static bool operator ==(StorageAccountType left, StorageAccountType right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(StorageAccountType left, StorageAccountType right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator StorageAccountType(string value) => new StorageAccountType(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is StorageAccountType other && Equals(other); + /// + public bool Equals(StorageAccountType other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.Serialization.cs new file mode 100644 index 0000000000000..f8c1ab10a99d6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.Serialization.cs @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UefiSettings : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UefiSettings)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(SecureBootEnabled)) + { + writer.WritePropertyName("secureBootEnabled"u8); + writer.WriteBooleanValue(SecureBootEnabled.Value); + } + if (Optional.IsDefined(VTpmEnabled)) + { + writer.WritePropertyName("vTpmEnabled"u8); + writer.WriteBooleanValue(VTpmEnabled.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UefiSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UefiSettings)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUefiSettings(document.RootElement, options); + } + + internal static UefiSettings DeserializeUefiSettings(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool? secureBootEnabled = default; + bool? vTpmEnabled = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("secureBootEnabled"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + secureBootEnabled = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("vTpmEnabled"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + vTpmEnabled = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UefiSettings(secureBootEnabled, vTpmEnabled, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UefiSettings)} does not support writing '{options.Format}' format."); + } + } + + UefiSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUefiSettings(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UefiSettings)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UefiSettings FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUefiSettings(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.cs new file mode 100644 index 0000000000000..bc8c7c1a7f490 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + public partial class UefiSettings + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public UefiSettings() + { + } + + /// Initializes a new instance of . + /// Specifies whether secure boot should be enabled on the virtual machine. + /// Specifies whether vTPM should be enabled on the virtual machine. + /// Keeps track of any properties unknown to the library. + internal UefiSettings(bool? secureBootEnabled, bool? vTpmEnabled, IDictionary serializedAdditionalRawData) + { + SecureBootEnabled = secureBootEnabled; + VTpmEnabled = vTpmEnabled; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Specifies whether secure boot should be enabled on the virtual machine. + public bool? SecureBootEnabled { get; set; } + /// Specifies whether vTPM should be enabled on the virtual machine. + public bool? VTpmEnabled { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs new file mode 100644 index 0000000000000..ba50197fc2545 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradeMode.cs @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// UpgradeMode enums. + public readonly partial struct UpgradeMode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public UpgradeMode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string AutomaticValue = "automatic"; + private const string ManualValue = "manual"; + private const string RollingValue = "rolling"; + + /// TAll virtual machines in the scale set are automatically updated at the same time. + public static UpgradeMode Automatic { get; } = new UpgradeMode(AutomaticValue); + /// You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action. + public static UpgradeMode Manual { get; } = new UpgradeMode(ManualValue); + /// The existing instances in a scale set are brought down in batches to be upgraded. Once the upgraded batch is complete, the instances will begin taking traffic again and the next batch will begin. This continues until all instances brought up-to-date. + public static UpgradeMode Rolling { get; } = new UpgradeMode(RollingValue); + /// Determines if two values are the same. + public static bool operator ==(UpgradeMode left, UpgradeMode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(UpgradeMode left, UpgradeMode right) => !left.Equals(right); + /// Converts a string to a . + public static implicit operator UpgradeMode(string value) => new UpgradeMode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is UpgradeMode other && Equals(other); + /// + public bool Equals(UpgradeMode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value?.GetHashCode() ?? 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.Serialization.cs new file mode 100644 index 0000000000000..22e0ce0865d5d --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.Serialization.cs @@ -0,0 +1,165 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UpgradePolicy : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UpgradePolicy)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("mode"u8); + writer.WriteStringValue(Mode.ToString()); + if (Optional.IsDefined(AutomaticOsUpgradePolicy)) + { + writer.WritePropertyName("automaticOSUpgradePolicy"u8); + writer.WriteObjectValue(AutomaticOsUpgradePolicy, options); + } + if (Optional.IsDefined(RollingUpgradePolicy)) + { + writer.WritePropertyName("rollingUpgradePolicy"u8); + writer.WriteObjectValue(RollingUpgradePolicy, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UpgradePolicy IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UpgradePolicy)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUpgradePolicy(document.RootElement, options); + } + + internal static UpgradePolicy DeserializeUpgradePolicy(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + UpgradeMode mode = default; + AutomaticOsUpgradePolicy automaticOSUpgradePolicy = default; + RollingUpgradePolicy rollingUpgradePolicy = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("mode"u8)) + { + mode = new UpgradeMode(property.Value.GetString()); + continue; + } + if (property.NameEquals("automaticOSUpgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + automaticOSUpgradePolicy = AutomaticOsUpgradePolicy.DeserializeAutomaticOsUpgradePolicy(property.Value, options); + continue; + } + if (property.NameEquals("rollingUpgradePolicy"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + rollingUpgradePolicy = RollingUpgradePolicy.DeserializeRollingUpgradePolicy(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UpgradePolicy(mode, automaticOSUpgradePolicy, rollingUpgradePolicy, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UpgradePolicy)} does not support writing '{options.Format}' format."); + } + } + + UpgradePolicy IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUpgradePolicy(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UpgradePolicy)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UpgradePolicy FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUpgradePolicy(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs new file mode 100644 index 0000000000000..ff0839122bf21 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UpgradePolicy.cs @@ -0,0 +1,80 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Describes an upgrade policy - automatic, manual, or rolling. + public partial class UpgradePolicy + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// Specifies the mode of an upgrade to virtual machines in the scale set.<br /><br /> Possible values are:<br /><br /> **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.<br /><br /> **Automatic** - All virtual machines in the scale set are automatically updated at the same time.<br /><br /> **Rolling** - Scale set performs updates in batches with an optional pause time in between. + public UpgradePolicy(UpgradeMode mode) + { + Mode = mode; + } + + /// Initializes a new instance of . + /// Specifies the mode of an upgrade to virtual machines in the scale set.<br /><br /> Possible values are:<br /><br /> **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.<br /><br /> **Automatic** - All virtual machines in the scale set are automatically updated at the same time.<br /><br /> **Rolling** - Scale set performs updates in batches with an optional pause time in between. + /// Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. + /// The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. + /// Keeps track of any properties unknown to the library. + internal UpgradePolicy(UpgradeMode mode, AutomaticOsUpgradePolicy automaticOsUpgradePolicy, RollingUpgradePolicy rollingUpgradePolicy, IDictionary serializedAdditionalRawData) + { + Mode = mode; + AutomaticOsUpgradePolicy = automaticOsUpgradePolicy; + RollingUpgradePolicy = rollingUpgradePolicy; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal UpgradePolicy() + { + } + + /// Specifies the mode of an upgrade to virtual machines in the scale set.<br /><br /> Possible values are:<br /><br /> **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.<br /><br /> **Automatic** - All virtual machines in the scale set are automatically updated at the same time.<br /><br /> **Rolling** - Scale set performs updates in batches with an optional pause time in between. + public UpgradeMode Mode { get; set; } + /// Configuration parameters used for performing automatic OS Upgrade. The configuration parameters used for performing automatic OS upgrade. + public AutomaticOsUpgradePolicy AutomaticOsUpgradePolicy { get; set; } + /// The configuration parameters used while performing a rolling upgrade. This property is only supported on Pools with the virtualMachineConfiguration property. + public RollingUpgradePolicy RollingUpgradePolicy { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.Serialization.cs new file mode 100644 index 0000000000000..5f9cc8af92622 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.Serialization.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UploadBatchServiceLogsContent : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("containerUrl"u8); + writer.WriteStringValue(ContainerUrl); + writer.WritePropertyName("startTime"u8); + writer.WriteStringValue(StartTime, "O"); + if (Optional.IsDefined(EndTime)) + { + writer.WritePropertyName("endTime"u8); + writer.WriteStringValue(EndTime.Value, "O"); + } + if (Optional.IsDefined(IdentityReference)) + { + writer.WritePropertyName("identityReference"u8); + writer.WriteObjectValue(IdentityReference, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UploadBatchServiceLogsContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUploadBatchServiceLogsContent(document.RootElement, options); + } + + internal static UploadBatchServiceLogsContent DeserializeUploadBatchServiceLogsContent(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string containerUrl = default; + DateTimeOffset startTime = default; + DateTimeOffset? endTime = default; + BatchNodeIdentityReference identityReference = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("containerUrl"u8)) + { + containerUrl = property.Value.GetString(); + continue; + } + if (property.NameEquals("startTime"u8)) + { + startTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("endTime"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + endTime = property.Value.GetDateTimeOffset("O"); + continue; + } + if (property.NameEquals("identityReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + identityReference = BatchNodeIdentityReference.DeserializeBatchNodeIdentityReference(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UploadBatchServiceLogsContent(containerUrl, startTime, endTime, identityReference, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support writing '{options.Format}' format."); + } + } + + UploadBatchServiceLogsContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUploadBatchServiceLogsContent(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UploadBatchServiceLogsContent FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUploadBatchServiceLogsContent(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.cs new file mode 100644 index 0000000000000..e5283b9180cf5 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.cs @@ -0,0 +1,89 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The Azure Batch service log files upload parameters for a Compute Node. + public partial class UploadBatchServiceLogsContent + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. + /// is null. + public UploadBatchServiceLogsContent(string containerUrl, DateTimeOffset startTime) + { + Argument.AssertNotNull(containerUrl, nameof(containerUrl)); + + ContainerUrl = containerUrl; + StartTime = startTime; + } + + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. + /// The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. + /// Keeps track of any properties unknown to the library. + internal UploadBatchServiceLogsContent(string containerUrl, DateTimeOffset startTime, DateTimeOffset? endTime, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + { + ContainerUrl = containerUrl; + StartTime = startTime; + EndTime = endTime; + IdentityReference = identityReference; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal UploadBatchServiceLogsContent() + { + } + + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + public string ContainerUrl { get; } + /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. + public DateTimeOffset StartTime { get; } + /// The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. + public DateTimeOffset? EndTime { get; set; } + /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. + public BatchNodeIdentityReference IdentityReference { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsResult.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsResult.Serialization.cs new file mode 100644 index 0000000000000..70bc53481d422 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsResult.Serialization.cs @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UploadBatchServiceLogsResult : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UploadBatchServiceLogsResult)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("virtualDirectoryName"u8); + writer.WriteStringValue(VirtualDirectoryName); + writer.WritePropertyName("numberOfFilesUploaded"u8); + writer.WriteNumberValue(NumberOfFilesUploaded); + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UploadBatchServiceLogsResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UploadBatchServiceLogsResult)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUploadBatchServiceLogsResult(document.RootElement, options); + } + + internal static UploadBatchServiceLogsResult DeserializeUploadBatchServiceLogsResult(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string virtualDirectoryName = default; + int numberOfFilesUploaded = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("virtualDirectoryName"u8)) + { + virtualDirectoryName = property.Value.GetString(); + continue; + } + if (property.NameEquals("numberOfFilesUploaded"u8)) + { + numberOfFilesUploaded = property.Value.GetInt32(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UploadBatchServiceLogsResult(virtualDirectoryName, numberOfFilesUploaded, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UploadBatchServiceLogsResult)} does not support writing '{options.Format}' format."); + } + } + + UploadBatchServiceLogsResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUploadBatchServiceLogsResult(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UploadBatchServiceLogsResult)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UploadBatchServiceLogsResult FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUploadBatchServiceLogsResult(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsResult.cs new file mode 100644 index 0000000000000..d7fc7a0306ece --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsResult.cs @@ -0,0 +1,81 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The result of uploading Batch service log files from a specific Compute Node. + public partial class UploadBatchServiceLogsResult + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. + /// The number of log files which will be uploaded. + /// is null. + internal UploadBatchServiceLogsResult(string virtualDirectoryName, int numberOfFilesUploaded) + { + Argument.AssertNotNull(virtualDirectoryName, nameof(virtualDirectoryName)); + + VirtualDirectoryName = virtualDirectoryName; + NumberOfFilesUploaded = numberOfFilesUploaded; + } + + /// Initializes a new instance of . + /// The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. + /// The number of log files which will be uploaded. + /// Keeps track of any properties unknown to the library. + internal UploadBatchServiceLogsResult(string virtualDirectoryName, int numberOfFilesUploaded, IDictionary serializedAdditionalRawData) + { + VirtualDirectoryName = virtualDirectoryName; + NumberOfFilesUploaded = numberOfFilesUploaded; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal UploadBatchServiceLogsResult() + { + } + + /// The virtual directory within Azure Blob Storage container to which the Batch Service log file(s) will be uploaded. The virtual directory name is part of the blob name for each log file uploaded, and it is built based poolId, nodeId and a unique identifier. + public string VirtualDirectoryName { get; } + /// The number of log files which will be uploaded. + public int NumberOfFilesUploaded { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAccount.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAccount.Serialization.cs new file mode 100644 index 0000000000000..47a39b431fc5f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAccount.Serialization.cs @@ -0,0 +1,194 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UserAccount : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UserAccount)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("password"u8); + writer.WriteStringValue(Password); + if (Optional.IsDefined(ElevationLevel)) + { + writer.WritePropertyName("elevationLevel"u8); + writer.WriteStringValue(ElevationLevel.Value.ToString()); + } + if (Optional.IsDefined(LinuxUserConfiguration)) + { + writer.WritePropertyName("linuxUserConfiguration"u8); + writer.WriteObjectValue(LinuxUserConfiguration, options); + } + if (Optional.IsDefined(WindowsUserConfiguration)) + { + writer.WritePropertyName("windowsUserConfiguration"u8); + writer.WriteObjectValue(WindowsUserConfiguration, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UserAccount IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UserAccount)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUserAccount(document.RootElement, options); + } + + internal static UserAccount DeserializeUserAccount(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string password = default; + ElevationLevel? elevationLevel = default; + LinuxUserConfiguration linuxUserConfiguration = default; + WindowsUserConfiguration windowsUserConfiguration = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("password"u8)) + { + password = property.Value.GetString(); + continue; + } + if (property.NameEquals("elevationLevel"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + elevationLevel = new ElevationLevel(property.Value.GetString()); + continue; + } + if (property.NameEquals("linuxUserConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + linuxUserConfiguration = LinuxUserConfiguration.DeserializeLinuxUserConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("windowsUserConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + windowsUserConfiguration = WindowsUserConfiguration.DeserializeWindowsUserConfiguration(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UserAccount( + name, + password, + elevationLevel, + linuxUserConfiguration, + windowsUserConfiguration, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UserAccount)} does not support writing '{options.Format}' format."); + } + } + + UserAccount IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUserAccount(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UserAccount)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UserAccount FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUserAccount(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAccount.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAccount.cs new file mode 100644 index 0000000000000..bd7b9f96245ea --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAccount.cs @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// Properties used to create a user used to execute Tasks on an Azure Batch + /// Compute Node. + /// + public partial class UserAccount + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. + /// The password for the user Account. + /// or is null. + public UserAccount(string name, string password) + { + Argument.AssertNotNull(name, nameof(name)); + Argument.AssertNotNull(password, nameof(password)); + + Name = name; + Password = password; + } + + /// Initializes a new instance of . + /// The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. + /// The password for the user Account. + /// The elevation level of the user Account. The default value is nonAdmin. + /// The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. + /// The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. + /// Keeps track of any properties unknown to the library. + internal UserAccount(string name, string password, ElevationLevel? elevationLevel, LinuxUserConfiguration linuxUserConfiguration, WindowsUserConfiguration windowsUserConfiguration, IDictionary serializedAdditionalRawData) + { + Name = name; + Password = password; + ElevationLevel = elevationLevel; + LinuxUserConfiguration = linuxUserConfiguration; + WindowsUserConfiguration = windowsUserConfiguration; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal UserAccount() + { + } + + /// The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. + public string Name { get; set; } + /// The password for the user Account. + public string Password { get; set; } + /// The elevation level of the user Account. The default value is nonAdmin. + public ElevationLevel? ElevationLevel { get; set; } + /// The Linux-specific user configuration for the user Account. This property is ignored if specified on a Windows Pool. If not specified, the user is created with the default options. + public LinuxUserConfiguration LinuxUserConfiguration { get; set; } + /// The Windows-specific user configuration for the user Account. This property can only be specified if the user is on a Windows Pool. If not specified and on a Windows Pool, the user is created with the default options. + public WindowsUserConfiguration WindowsUserConfiguration { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.Serialization.cs new file mode 100644 index 0000000000000..33b89d5ad5ec1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.Serialization.cs @@ -0,0 +1,157 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UserAssignedIdentity : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("resourceId"u8); + writer.WriteStringValue(ResourceId); + if (options.Format != "W" && Optional.IsDefined(ClientId)) + { + writer.WritePropertyName("clientId"u8); + writer.WriteStringValue(ClientId); + } + if (options.Format != "W" && Optional.IsDefined(PrincipalId)) + { + writer.WritePropertyName("principalId"u8); + writer.WriteStringValue(PrincipalId); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UserAssignedIdentity IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUserAssignedIdentity(document.RootElement, options); + } + + internal static UserAssignedIdentity DeserializeUserAssignedIdentity(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string resourceId = default; + string clientId = default; + string principalId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("resourceId"u8)) + { + resourceId = property.Value.GetString(); + continue; + } + if (property.NameEquals("clientId"u8)) + { + clientId = property.Value.GetString(); + continue; + } + if (property.NameEquals("principalId"u8)) + { + principalId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UserAssignedIdentity(resourceId, clientId, principalId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support writing '{options.Format}' format."); + } + } + + UserAssignedIdentity IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUserAssignedIdentity(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UserAssignedIdentity FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUserAssignedIdentity(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.cs new file mode 100644 index 0000000000000..efdf060fdc91b --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.cs @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The user assigned Identity. + public partial class UserAssignedIdentity + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The ARM resource id of the user assigned identity. + /// is null. + internal UserAssignedIdentity(string resourceId) + { + Argument.AssertNotNull(resourceId, nameof(resourceId)); + + ResourceId = resourceId; + } + + /// Initializes a new instance of . + /// The ARM resource id of the user assigned identity. + /// The client id of the user assigned identity. + /// The principal id of the user assigned identity. + /// Keeps track of any properties unknown to the library. + internal UserAssignedIdentity(string resourceId, string clientId, string principalId, IDictionary serializedAdditionalRawData) + { + ResourceId = resourceId; + ClientId = clientId; + PrincipalId = principalId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal UserAssignedIdentity() + { + } + + /// The ARM resource id of the user assigned identity. + public string ResourceId { get; } + /// The client id of the user assigned identity. + public string ClientId { get; } + /// The principal id of the user assigned identity. + public string PrincipalId { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserIdentity.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UserIdentity.Serialization.cs new file mode 100644 index 0000000000000..abb58abef495e --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UserIdentity.Serialization.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class UserIdentity : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UserIdentity)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Username)) + { + writer.WritePropertyName("username"u8); + writer.WriteStringValue(Username); + } + if (Optional.IsDefined(AutoUser)) + { + writer.WritePropertyName("autoUser"u8); + writer.WriteObjectValue(AutoUser, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + UserIdentity IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(UserIdentity)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeUserIdentity(document.RootElement, options); + } + + internal static UserIdentity DeserializeUserIdentity(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string username = default; + AutoUserSpecification autoUser = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("username"u8)) + { + username = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoUser"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoUser = AutoUserSpecification.DeserializeAutoUserSpecification(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new UserIdentity(username, autoUser, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(UserIdentity)} does not support writing '{options.Format}' format."); + } + } + + UserIdentity IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeUserIdentity(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(UserIdentity)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static UserIdentity FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeUserIdentity(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserIdentity.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UserIdentity.cs new file mode 100644 index 0000000000000..033d6bcf10ec3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UserIdentity.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The definition of the user identity under which the Task is run. Specify either the userName or autoUser property, but not both. + public partial class UserIdentity + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public UserIdentity() + { + } + + /// Initializes a new instance of . + /// The name of the user identity under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. + /// The auto user under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. + /// Keeps track of any properties unknown to the library. + internal UserIdentity(string username, AutoUserSpecification autoUser, IDictionary serializedAdditionalRawData) + { + Username = username; + AutoUser = autoUser; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The name of the user identity under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. + public string Username { get; set; } + /// The auto user under which the Task is run. The userName and autoUser properties are mutually exclusive; you must specify one but not both. + public AutoUserSpecification AutoUser { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtension.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtension.Serialization.cs new file mode 100644 index 0000000000000..80a26f3037780 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtension.Serialization.cs @@ -0,0 +1,279 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class VMExtension : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VMExtension)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + writer.WritePropertyName("publisher"u8); + writer.WriteStringValue(Publisher); + writer.WritePropertyName("type"u8); + writer.WriteStringValue(Type); + if (Optional.IsDefined(TypeHandlerVersion)) + { + writer.WritePropertyName("typeHandlerVersion"u8); + writer.WriteStringValue(TypeHandlerVersion); + } + if (Optional.IsDefined(AutoUpgradeMinorVersion)) + { + writer.WritePropertyName("autoUpgradeMinorVersion"u8); + writer.WriteBooleanValue(AutoUpgradeMinorVersion.Value); + } + if (Optional.IsDefined(EnableAutomaticUpgrade)) + { + writer.WritePropertyName("enableAutomaticUpgrade"u8); + writer.WriteBooleanValue(EnableAutomaticUpgrade.Value); + } + if (Optional.IsCollectionDefined(Settings)) + { + writer.WritePropertyName("settings"u8); + writer.WriteStartObject(); + foreach (var item in Settings) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + if (Optional.IsCollectionDefined(ProtectedSettings)) + { + writer.WritePropertyName("protectedSettings"u8); + writer.WriteStartObject(); + foreach (var item in ProtectedSettings) + { + writer.WritePropertyName(item.Key); + writer.WriteStringValue(item.Value); + } + writer.WriteEndObject(); + } + if (Optional.IsCollectionDefined(ProvisionAfterExtensions)) + { + writer.WritePropertyName("provisionAfterExtensions"u8); + writer.WriteStartArray(); + foreach (var item in ProvisionAfterExtensions) + { + writer.WriteStringValue(item); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + VMExtension IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VMExtension)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeVMExtension(document.RootElement, options); + } + + internal static VMExtension DeserializeVMExtension(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + string publisher = default; + string type = default; + string typeHandlerVersion = default; + bool? autoUpgradeMinorVersion = default; + bool? enableAutomaticUpgrade = default; + IDictionary settings = default; + IDictionary protectedSettings = default; + IList provisionAfterExtensions = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("publisher"u8)) + { + publisher = property.Value.GetString(); + continue; + } + if (property.NameEquals("type"u8)) + { + type = property.Value.GetString(); + continue; + } + if (property.NameEquals("typeHandlerVersion"u8)) + { + typeHandlerVersion = property.Value.GetString(); + continue; + } + if (property.NameEquals("autoUpgradeMinorVersion"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + autoUpgradeMinorVersion = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("enableAutomaticUpgrade"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAutomaticUpgrade = property.Value.GetBoolean(); + continue; + } + if (property.NameEquals("settings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + settings = dictionary; + continue; + } + if (property.NameEquals("protectedSettings"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + Dictionary dictionary = new Dictionary(); + foreach (var property0 in property.Value.EnumerateObject()) + { + dictionary.Add(property0.Name, property0.Value.GetString()); + } + protectedSettings = dictionary; + continue; + } + if (property.NameEquals("provisionAfterExtensions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(item.GetString()); + } + provisionAfterExtensions = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new VMExtension( + name, + publisher, + type, + typeHandlerVersion, + autoUpgradeMinorVersion, + enableAutomaticUpgrade, + settings ?? new ChangeTrackingDictionary(), + protectedSettings ?? new ChangeTrackingDictionary(), + provisionAfterExtensions ?? new ChangeTrackingList(), + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(VMExtension)} does not support writing '{options.Format}' format."); + } + } + + VMExtension IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeVMExtension(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(VMExtension)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static VMExtension FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeVMExtension(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtension.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtension.cs new file mode 100644 index 0000000000000..a041901b66bbc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtension.cs @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The configuration for virtual machine extensions. + public partial class VMExtension + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// The name of the virtual machine extension. + /// The name of the extension handler publisher. + /// The type of the extension. + /// , or is null. + public VMExtension(string name, string publisher, string type) + { + Argument.AssertNotNull(name, nameof(name)); + Argument.AssertNotNull(publisher, nameof(publisher)); + Argument.AssertNotNull(type, nameof(type)); + + Name = name; + Publisher = publisher; + Type = type; + Settings = new ChangeTrackingDictionary(); + ProtectedSettings = new ChangeTrackingDictionary(); + ProvisionAfterExtensions = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The name of the virtual machine extension. + /// The name of the extension handler publisher. + /// The type of the extension. + /// The version of script handler. + /// Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + /// Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. + /// JSON formatted public settings for the extension. + /// The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + /// The collection of extension names. Collection of extension names after which this extension needs to be provisioned. + /// Keeps track of any properties unknown to the library. + internal VMExtension(string name, string publisher, string type, string typeHandlerVersion, bool? autoUpgradeMinorVersion, bool? enableAutomaticUpgrade, IDictionary settings, IDictionary protectedSettings, IList provisionAfterExtensions, IDictionary serializedAdditionalRawData) + { + Name = name; + Publisher = publisher; + Type = type; + TypeHandlerVersion = typeHandlerVersion; + AutoUpgradeMinorVersion = autoUpgradeMinorVersion; + EnableAutomaticUpgrade = enableAutomaticUpgrade; + Settings = settings; + ProtectedSettings = protectedSettings; + ProvisionAfterExtensions = provisionAfterExtensions; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal VMExtension() + { + } + + /// The name of the virtual machine extension. + public string Name { get; set; } + /// The name of the extension handler publisher. + public string Publisher { get; set; } + /// The type of the extension. + public string Type { get; set; } + /// The version of script handler. + public string TypeHandlerVersion { get; set; } + /// Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + public bool? AutoUpgradeMinorVersion { get; set; } + /// Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension available. + public bool? EnableAutomaticUpgrade { get; set; } + /// JSON formatted public settings for the extension. + public IDictionary Settings { get; } + /// The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + public IDictionary ProtectedSettings { get; } + /// The collection of extension names. Collection of extension names after which this extension needs to be provisioned. + public IList ProvisionAfterExtensions { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtensionInstanceView.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtensionInstanceView.Serialization.cs new file mode 100644 index 0000000000000..42e2efd6b4cf2 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtensionInstanceView.Serialization.cs @@ -0,0 +1,188 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class VMExtensionInstanceView : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VMExtensionInstanceView)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(Name)) + { + writer.WritePropertyName("name"u8); + writer.WriteStringValue(Name); + } + if (Optional.IsCollectionDefined(Statuses)) + { + writer.WritePropertyName("statuses"u8); + writer.WriteStartArray(); + foreach (var item in Statuses) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsCollectionDefined(SubStatuses)) + { + writer.WritePropertyName("subStatuses"u8); + writer.WriteStartArray(); + foreach (var item in SubStatuses) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + VMExtensionInstanceView IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VMExtensionInstanceView)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeVMExtensionInstanceView(document.RootElement, options); + } + + internal static VMExtensionInstanceView DeserializeVMExtensionInstanceView(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + string name = default; + IReadOnlyList statuses = default; + IReadOnlyList subStatuses = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("name"u8)) + { + name = property.Value.GetString(); + continue; + } + if (property.NameEquals("statuses"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(InstanceViewStatus.DeserializeInstanceViewStatus(item, options)); + } + statuses = array; + continue; + } + if (property.NameEquals("subStatuses"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(InstanceViewStatus.DeserializeInstanceViewStatus(item, options)); + } + subStatuses = array; + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new VMExtensionInstanceView(name, statuses ?? new ChangeTrackingList(), subStatuses ?? new ChangeTrackingList(), serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(VMExtensionInstanceView)} does not support writing '{options.Format}' format."); + } + } + + VMExtensionInstanceView IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeVMExtensionInstanceView(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(VMExtensionInstanceView)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static VMExtensionInstanceView FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeVMExtensionInstanceView(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtensionInstanceView.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtensionInstanceView.cs new file mode 100644 index 0000000000000..00ed9fc34b003 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VMExtensionInstanceView.cs @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// The vm extension instance view. + public partial class VMExtensionInstanceView + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal VMExtensionInstanceView() + { + Statuses = new ChangeTrackingList(); + SubStatuses = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// The name of the vm extension instance view. + /// The resource status information. + /// The resource status information. + /// Keeps track of any properties unknown to the library. + internal VMExtensionInstanceView(string name, IReadOnlyList statuses, IReadOnlyList subStatuses, IDictionary serializedAdditionalRawData) + { + Name = name; + Statuses = statuses; + SubStatuses = subStatuses; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The name of the vm extension instance view. + public string Name { get; } + /// The resource status information. + public IReadOnlyList Statuses { get; } + /// The resource status information. + public IReadOnlyList SubStatuses { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs new file mode 100644 index 0000000000000..5a2268329b43a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs @@ -0,0 +1,322 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class VirtualMachineConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VirtualMachineConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + writer.WritePropertyName("imageReference"u8); + writer.WriteObjectValue(ImageReference, options); + writer.WritePropertyName("nodeAgentSKUId"u8); + writer.WriteStringValue(NodeAgentSkuId); + if (Optional.IsDefined(WindowsConfiguration)) + { + writer.WritePropertyName("windowsConfiguration"u8); + writer.WriteObjectValue(WindowsConfiguration, options); + } + if (Optional.IsCollectionDefined(DataDisks)) + { + writer.WritePropertyName("dataDisks"u8); + writer.WriteStartArray(); + foreach (var item in DataDisks) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(LicenseType)) + { + writer.WritePropertyName("licenseType"u8); + writer.WriteStringValue(LicenseType); + } + if (Optional.IsDefined(ContainerConfiguration)) + { + writer.WritePropertyName("containerConfiguration"u8); + writer.WriteObjectValue(ContainerConfiguration, options); + } + if (Optional.IsDefined(DiskEncryptionConfiguration)) + { + writer.WritePropertyName("diskEncryptionConfiguration"u8); + writer.WriteObjectValue(DiskEncryptionConfiguration, options); + } + if (Optional.IsDefined(NodePlacementConfiguration)) + { + writer.WritePropertyName("nodePlacementConfiguration"u8); + writer.WriteObjectValue(NodePlacementConfiguration, options); + } + if (Optional.IsCollectionDefined(Extensions)) + { + writer.WritePropertyName("extensions"u8); + writer.WriteStartArray(); + foreach (var item in Extensions) + { + writer.WriteObjectValue(item, options); + } + writer.WriteEndArray(); + } + if (Optional.IsDefined(OsDisk)) + { + writer.WritePropertyName("osDisk"u8); + writer.WriteObjectValue(OsDisk, options); + } + if (Optional.IsDefined(SecurityProfile)) + { + writer.WritePropertyName("securityProfile"u8); + writer.WriteObjectValue(SecurityProfile, options); + } + if (Optional.IsDefined(ServiceArtifactReference)) + { + writer.WritePropertyName("serviceArtifactReference"u8); + writer.WriteObjectValue(ServiceArtifactReference, options); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + VirtualMachineConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VirtualMachineConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeVirtualMachineConfiguration(document.RootElement, options); + } + + internal static VirtualMachineConfiguration DeserializeVirtualMachineConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ImageReference imageReference = default; + string nodeAgentSKUId = default; + WindowsConfiguration windowsConfiguration = default; + IList dataDisks = default; + string licenseType = default; + ContainerConfiguration containerConfiguration = default; + DiskEncryptionConfiguration diskEncryptionConfiguration = default; + BatchNodePlacementConfiguration nodePlacementConfiguration = default; + IList extensions = default; + OSDisk osDisk = default; + SecurityProfile securityProfile = default; + ServiceArtifactReference serviceArtifactReference = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("imageReference"u8)) + { + imageReference = ImageReference.DeserializeImageReference(property.Value, options); + continue; + } + if (property.NameEquals("nodeAgentSKUId"u8)) + { + nodeAgentSKUId = property.Value.GetString(); + continue; + } + if (property.NameEquals("windowsConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + windowsConfiguration = WindowsConfiguration.DeserializeWindowsConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("dataDisks"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(DataDisk.DeserializeDataDisk(item, options)); + } + dataDisks = array; + continue; + } + if (property.NameEquals("licenseType"u8)) + { + licenseType = property.Value.GetString(); + continue; + } + if (property.NameEquals("containerConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + containerConfiguration = ContainerConfiguration.DeserializeContainerConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("diskEncryptionConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + diskEncryptionConfiguration = DiskEncryptionConfiguration.DeserializeDiskEncryptionConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("nodePlacementConfiguration"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodePlacementConfiguration = BatchNodePlacementConfiguration.DeserializeBatchNodePlacementConfiguration(property.Value, options); + continue; + } + if (property.NameEquals("extensions"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + List array = new List(); + foreach (var item in property.Value.EnumerateArray()) + { + array.Add(VMExtension.DeserializeVMExtension(item, options)); + } + extensions = array; + continue; + } + if (property.NameEquals("osDisk"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + osDisk = OSDisk.DeserializeOSDisk(property.Value, options); + continue; + } + if (property.NameEquals("securityProfile"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + securityProfile = SecurityProfile.DeserializeSecurityProfile(property.Value, options); + continue; + } + if (property.NameEquals("serviceArtifactReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + serviceArtifactReference = ServiceArtifactReference.DeserializeServiceArtifactReference(property.Value, options); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new VirtualMachineConfiguration( + imageReference, + nodeAgentSKUId, + windowsConfiguration, + dataDisks ?? new ChangeTrackingList(), + licenseType, + containerConfiguration, + diskEncryptionConfiguration, + nodePlacementConfiguration, + extensions ?? new ChangeTrackingList(), + osDisk, + securityProfile, + serviceArtifactReference, + serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(VirtualMachineConfiguration)} does not support writing '{options.Format}' format."); + } + } + + VirtualMachineConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeVirtualMachineConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(VirtualMachineConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static VirtualMachineConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeVirtualMachineConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs new file mode 100644 index 0000000000000..8923d6045e329 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs @@ -0,0 +1,147 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// + /// The configuration for Compute Nodes in a Pool based on the Azure Virtual + /// Machines infrastructure. + /// + public partial class VirtualMachineConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + /// A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. + /// The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. + /// or is null. + public VirtualMachineConfiguration(ImageReference imageReference, string nodeAgentSkuId) + { + Argument.AssertNotNull(imageReference, nameof(imageReference)); + Argument.AssertNotNull(nodeAgentSkuId, nameof(nodeAgentSkuId)); + + ImageReference = imageReference; + NodeAgentSkuId = nodeAgentSkuId; + DataDisks = new ChangeTrackingList(); + Extensions = new ChangeTrackingList(); + } + + /// Initializes a new instance of . + /// A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. + /// The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. + /// Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. + /// The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + /// + /// This only applies to Images that contain the Windows operating system, and + /// should only be used when you hold valid on-premises licenses for the Compute + /// Nodes which will be deployed. If omitted, no on-premises licensing discount is + /// applied. Values are: + /// + /// Windows_Server - The on-premises license is for Windows + /// Server. + /// Windows_Client - The on-premises license is for Windows Client. + /// + /// + /// The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. + /// The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. + /// The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. + /// The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. + /// Settings for the operating system disk of the Virtual Machine. + /// Specifies the security profile settings for the virtual machine or virtual machine scale set. + /// Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. + /// Keeps track of any properties unknown to the library. + internal VirtualMachineConfiguration(ImageReference imageReference, string nodeAgentSkuId, WindowsConfiguration windowsConfiguration, IList dataDisks, string licenseType, ContainerConfiguration containerConfiguration, DiskEncryptionConfiguration diskEncryptionConfiguration, BatchNodePlacementConfiguration nodePlacementConfiguration, IList extensions, OSDisk osDisk, SecurityProfile securityProfile, ServiceArtifactReference serviceArtifactReference, IDictionary serializedAdditionalRawData) + { + ImageReference = imageReference; + NodeAgentSkuId = nodeAgentSkuId; + WindowsConfiguration = windowsConfiguration; + DataDisks = dataDisks; + LicenseType = licenseType; + ContainerConfiguration = containerConfiguration; + DiskEncryptionConfiguration = diskEncryptionConfiguration; + NodePlacementConfiguration = nodePlacementConfiguration; + Extensions = extensions; + OsDisk = osDisk; + SecurityProfile = securityProfile; + ServiceArtifactReference = serviceArtifactReference; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Initializes a new instance of for deserialization. + internal VirtualMachineConfiguration() + { + } + + /// A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. + public ImageReference ImageReference { get; set; } + /// The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. + public string NodeAgentSkuId { get; set; } + /// Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. + public WindowsConfiguration WindowsConfiguration { get; set; } + /// The configuration for data disks attached to the Compute Nodes in the Pool. This property must be specified if the Compute Nodes in the Pool need to have empty data disks attached to them. This cannot be updated. Each Compute Node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. When the Compute Node is removed from the Pool, the disk and all data associated with it is also deleted. The disk is not formatted after being attached, it must be formatted before use - for more information see https://docs.microsoft.com/en-us/azure/virtual-machines/linux/classic/attach-disk#initialize-a-new-data-disk-in-linux and https://docs.microsoft.com/en-us/azure/virtual-machines/windows/attach-disk-ps#add-an-empty-data-disk-to-a-virtual-machine. + public IList DataDisks { get; } + /// + /// This only applies to Images that contain the Windows operating system, and + /// should only be used when you hold valid on-premises licenses for the Compute + /// Nodes which will be deployed. If omitted, no on-premises licensing discount is + /// applied. Values are: + /// + /// Windows_Server - The on-premises license is for Windows + /// Server. + /// Windows_Client - The on-premises license is for Windows Client. + /// + /// + public string LicenseType { get; set; } + /// The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. + public ContainerConfiguration ContainerConfiguration { get; set; } + /// The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. + public DiskEncryptionConfiguration DiskEncryptionConfiguration { get; set; } + /// The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. + public BatchNodePlacementConfiguration NodePlacementConfiguration { get; set; } + /// The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. + public IList Extensions { get; } + /// Settings for the operating system disk of the Virtual Machine. + public OSDisk OsDisk { get; set; } + /// Specifies the security profile settings for the virtual machine or virtual machine scale set. + public SecurityProfile SecurityProfile { get; set; } + /// Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. + public ServiceArtifactReference ServiceArtifactReference { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs new file mode 100644 index 0000000000000..90cd6837075d1 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class VirtualMachineInfo : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VirtualMachineInfo)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(ImageReference)) + { + writer.WritePropertyName("imageReference"u8); + writer.WriteObjectValue(ImageReference, options); + } + if (Optional.IsDefined(ScaleSetVmResourceId)) + { + writer.WritePropertyName("scaleSetVmResourceId"u8); + writer.WriteStringValue(ScaleSetVmResourceId); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + VirtualMachineInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(VirtualMachineInfo)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeVirtualMachineInfo(document.RootElement, options); + } + + internal static VirtualMachineInfo DeserializeVirtualMachineInfo(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + ImageReference imageReference = default; + string scaleSetVmResourceId = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("imageReference"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + imageReference = ImageReference.DeserializeImageReference(property.Value, options); + continue; + } + if (property.NameEquals("scaleSetVmResourceId"u8)) + { + scaleSetVmResourceId = property.Value.GetString(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new VirtualMachineInfo(imageReference, scaleSetVmResourceId, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(VirtualMachineInfo)} does not support writing '{options.Format}' format."); + } + } + + VirtualMachineInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeVirtualMachineInfo(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(VirtualMachineInfo)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static VirtualMachineInfo FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeVirtualMachineInfo(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs new file mode 100644 index 0000000000000..7df368ae5ef7f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Info about the current state of the virtual machine. + public partial class VirtualMachineInfo + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + internal VirtualMachineInfo() + { + } + + /// Initializes a new instance of . + /// The reference to the Azure Virtual Machine's Marketplace Image. + /// The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. + /// Keeps track of any properties unknown to the library. + internal VirtualMachineInfo(ImageReference imageReference, string scaleSetVmResourceId, IDictionary serializedAdditionalRawData) + { + ImageReference = imageReference; + ScaleSetVmResourceId = scaleSetVmResourceId; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The reference to the Azure Virtual Machine's Marketplace Image. + public ImageReference ImageReference { get; } + /// The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. + public string ScaleSetVmResourceId { get; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsConfiguration.Serialization.cs new file mode 100644 index 0000000000000..273afa8eb8aba --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsConfiguration.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class WindowsConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(WindowsConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(EnableAutomaticUpdates)) + { + writer.WritePropertyName("enableAutomaticUpdates"u8); + writer.WriteBooleanValue(EnableAutomaticUpdates.Value); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + WindowsConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(WindowsConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeWindowsConfiguration(document.RootElement, options); + } + + internal static WindowsConfiguration DeserializeWindowsConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + bool? enableAutomaticUpdates = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("enableAutomaticUpdates"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + enableAutomaticUpdates = property.Value.GetBoolean(); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new WindowsConfiguration(enableAutomaticUpdates, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(WindowsConfiguration)} does not support writing '{options.Format}' format."); + } + } + + WindowsConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeWindowsConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(WindowsConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static WindowsConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeWindowsConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsConfiguration.cs new file mode 100644 index 0000000000000..9798daea6dc0a --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsConfiguration.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Windows operating system settings to apply to the virtual machine. + public partial class WindowsConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public WindowsConfiguration() + { + } + + /// Initializes a new instance of . + /// Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. + /// Keeps track of any properties unknown to the library. + internal WindowsConfiguration(bool? enableAutomaticUpdates, IDictionary serializedAdditionalRawData) + { + EnableAutomaticUpdates = enableAutomaticUpdates; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// Whether automatic updates are enabled on the virtual machine. If omitted, the default value is true. + public bool? EnableAutomaticUpdates { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.Serialization.cs new file mode 100644 index 0000000000000..225d0b67c3d56 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.Serialization.cs @@ -0,0 +1,142 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ClientModel.Primitives; +using System.Collections.Generic; +using System.Text.Json; +using Azure.Core; + +namespace Azure.Compute.Batch +{ + public partial class WindowsUserConfiguration : IUtf8JsonSerializable, IJsonModel + { + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(WindowsUserConfiguration)} does not support writing '{format}' format."); + } + + writer.WriteStartObject(); + if (Optional.IsDefined(LoginMode)) + { + writer.WritePropertyName("loginMode"u8); + writer.WriteStringValue(LoginMode.Value.ToString()); + } + if (options.Format != "W" && _serializedAdditionalRawData != null) + { + foreach (var item in _serializedAdditionalRawData) + { + writer.WritePropertyName(item.Key); +#if NET6_0_OR_GREATER + writer.WriteRawValue(item.Value); +#else + using (JsonDocument document = JsonDocument.Parse(item.Value)) + { + JsonSerializer.Serialize(writer, document.RootElement); + } +#endif + } + } + writer.WriteEndObject(); + } + + WindowsUserConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + if (format != "J") + { + throw new FormatException($"The model {nameof(WindowsUserConfiguration)} does not support reading '{format}' format."); + } + + using JsonDocument document = JsonDocument.ParseValue(ref reader); + return DeserializeWindowsUserConfiguration(document.RootElement, options); + } + + internal static WindowsUserConfiguration DeserializeWindowsUserConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + { + options ??= ModelSerializationExtensions.WireOptions; + + if (element.ValueKind == JsonValueKind.Null) + { + return null; + } + LoginMode? loginMode = default; + IDictionary serializedAdditionalRawData = default; + Dictionary rawDataDictionary = new Dictionary(); + foreach (var property in element.EnumerateObject()) + { + if (property.NameEquals("loginMode"u8)) + { + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + loginMode = new LoginMode(property.Value.GetString()); + continue; + } + if (options.Format != "W") + { + rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); + } + } + serializedAdditionalRawData = rawDataDictionary; + return new WindowsUserConfiguration(loginMode, serializedAdditionalRawData); + } + + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + return ModelReaderWriter.Write(this, options); + default: + throw new FormatException($"The model {nameof(WindowsUserConfiguration)} does not support writing '{options.Format}' format."); + } + } + + WindowsUserConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + { + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + + switch (format) + { + case "J": + { + using JsonDocument document = JsonDocument.Parse(data); + return DeserializeWindowsUserConfiguration(document.RootElement, options); + } + default: + throw new FormatException($"The model {nameof(WindowsUserConfiguration)} does not support reading '{options.Format}' format."); + } + } + + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + + /// Deserializes the model from a raw response. + /// The response to deserialize the model from. + internal static WindowsUserConfiguration FromResponse(Response response) + { + using var document = JsonDocument.Parse(response.Content); + return DeserializeWindowsUserConfiguration(document.RootElement); + } + + /// Convert into a . + internal virtual RequestContent ToRequestContent() + { + var content = new Utf8JsonRequestContent(); + content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); + return content; + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs new file mode 100644 index 0000000000000..1f3173fcb231c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/WindowsUserConfiguration.cs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; + +namespace Azure.Compute.Batch +{ + /// Properties used to create a user Account on a Windows Compute Node. + public partial class WindowsUserConfiguration + { + /// + /// Keeps track of any properties unknown to the library. + /// + /// To assign an object to the value of this property use . + /// + /// + /// To assign an already formatted json string to this property use . + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromObjectAsJson("foo") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromString("\"foo\"") + /// Creates a payload of "foo". + /// + /// + /// BinaryData.FromObjectAsJson(new { key = "value" }) + /// Creates a payload of { "key": "value" }. + /// + /// + /// BinaryData.FromString("{\"key\": \"value\"}") + /// Creates a payload of { "key": "value" }. + /// + /// + /// + /// + private IDictionary _serializedAdditionalRawData; + + /// Initializes a new instance of . + public WindowsUserConfiguration() + { + } + + /// Initializes a new instance of . + /// The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. + /// Keeps track of any properties unknown to the library. + internal WindowsUserConfiguration(LoginMode? loginMode, IDictionary serializedAdditionalRawData) + { + LoginMode = loginMode; + _serializedAdditionalRawData = serializedAdditionalRawData; + } + + /// The login mode for the user. The default value for VirtualMachineConfiguration Pools is 'batch'. + public LoginMode? LoginMode { get; set; } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/GlobalSuppressions.cs b/sdk/batch/Azure.Compute.Batch/src/GlobalSuppressions.cs new file mode 100644 index 0000000000000..4c34569e52ea3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/GlobalSuppressions.cs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This file is used by Code Analysis to maintain SuppressMessage +// attributes that are applied to this project. +// Project-level suppressions either have no target or are given +// a specific target and scoped to a namespace, type, member, etc. + +using System.Diagnostics.CodeAnalysis; +using System.Reflection; +using System.Security.Cryptography; + +[assembly: SuppressMessage("Usage", "AZC0030:Use one of the following pre-approved namespace groups (https://azure.github.io/azure-sdk/registered_namespaces.html): Azure.AI, Azure.Analytics, Azure.Communication, Azure.Containers, Azure.Core.Expressions, Azure.Data, Azure.DigitalTwins, Azure.Identity, Azure.IoT, Azure.Learn, Azure.Management, Azure.Media, Azure.Messaging, Azure.MixedReality, Azure.Monitor, Azure.ResourceManager, Azure.Search, Azure.Security, Azure.Storage, Azure.Template, Microsoft.Extensions.Azure", Justification = "", Scope = "namespaceanddescendants", Target = "~N:Azure.Compute.Batch")] +[assembly: SuppressMessage("Usage", "AZC0001:Use one of the following pre-approved namespace groups (https://azure.github.io/azure-sdk/registered_namespaces.html): Azure.AI, Azure.Analytics, Azure.Communication, Azure.Containers, Azure.Core.Expressions, Azure.Data, Azure.DigitalTwins, Azure.Identity, Azure.IoT, Azure.Learn, Azure.Management, Azure.Media, Azure.Messaging, Azure.MixedReality, Azure.Monitor, Azure.ResourceManager, Azure.Search, Azure.Security, Azure.Storage, Azure.Template, Microsoft.Extensions.Azure", Justification = "", Scope = "namespaceanddescendants", Target = "~N:Azure.Compute.Batch")] +[assembly: SuppressMessage("Usage", "AZC0002:DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken.", Justification = "", Scope = "namespaceanddescendants", Target = "~N:Azure.Compute.Batch")] +[assembly: SuppressMessage("Usage", "AZC0012:Avoid single word type names", Justification = "", Scope = "type", Target = "~T:Azure.Compute.Batch.Schedule")] +[assembly: SuppressMessage("Usage", "AZC0001:Use one of the following pre-approved namespace groups (https://azure.github.io/azure-sdk/registered_namespaces.html): Azure.AI, Azure.Analytics, Azure.Communication, Azure.Containers, Azure.Core.Expressions, Azure.Data, Azure.DigitalTwins, Azure.Identity, Azure.IoT, Azure.Learn, Azure.Management, Azure.Media, Azure.Messaging, Azure.MixedReality, Azure.Monitor, Azure.ResourceManager, Azure.Search, Azure.Security, Azure.Storage, Azure.Template, Microsoft.Extensions.Azure", Justification = "", Scope = "namespace", Target = "~N:Azure.Compute.Batch.Models")] +[assembly: SuppressMessage("Usage", "AZC0012:Avoid single word type names", Justification = "", Scope = "type", Target = "~T:Azure.Compute.Batch.Models.Schedule")] +[assembly: SuppressMessage("Usage", "AZC0012:Avoid single word type names", Justification = "", Scope = "type", Target = "~T:Azure.Compute.Batch.Models.Schedule")] +[assembly: SuppressMessage("Usage", "AZC0012:Avoid single word type names", Justification = "", Scope = "type", Target = "~T:Azure.Compute.Batch.Models.Schedule")] diff --git a/sdk/batch/Azure.Compute.Batch/src/Properties/AssemblyInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Properties/AssemblyInfo.cs new file mode 100644 index 0000000000000..5165a039e89da --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Properties/AssemblyInfo.cs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System.Runtime.CompilerServices; + +[assembly: InternalsVisibleTo("Azure.Compute.Batch.Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100d15ddcb29688295338af4b7686603fe614abd555e09efba8fb88ee09e1f7b1ccaeed2e8f823fa9eef3fdd60217fc012ea67d2479751a0b8c087a4185541b851bd8b16f8d91b840e51b1cb0ba6fe647997e57429265e85ef62d565db50a69ae1647d54d7bd855e4db3d8a91510e5bcbd0edfbbecaa20a7bd9ae74593daa7b11b4")] + +// Replace Microsoft.Test with the correct resource provider namepace for your service and uncomment. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-services-resource-providers +// for the list of possible values. +[assembly: Azure.Core.AzureResourceProviderNamespace("Microsoft.Template")] diff --git a/sdk/batch/Azure.Compute.Batch/tests/Azure.Compute.Batch.Tests.csproj b/sdk/batch/Azure.Compute.Batch/tests/Azure.Compute.Batch.Tests.csproj new file mode 100644 index 0000000000000..54309afb2d601 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Azure.Compute.Batch.Tests.csproj @@ -0,0 +1,29 @@ + + + $(RequiredTargetFrameworks) + + + $(NoWarn);CS1591 + + + + + + + + + + + + + + + + + + + + + + + diff --git a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs new file mode 100644 index 0000000000000..60acc2870e928 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs @@ -0,0 +1,20363 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.Collections.Generic; +using System.Text.Json; +using System.Threading.Tasks; +using System.Xml; +using Azure.Core; +using Azure.Identity; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Samples +{ + public partial class Samples_BatchClient + { + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplication_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetApplication("", null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplication_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetApplicationAsync("", null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplication_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetApplication(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplication_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetApplicationAsync(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplication_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetApplication("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplication_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetApplicationAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplication_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetApplication("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplication_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetApplicationAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreatePool_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + vmSize = "", + }); + Response response = client.CreatePool(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreatePool_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + vmSize = "", + }); + Response response = await client.CreatePoolAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreatePool_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateContent pool = new BatchPoolCreateContent("", ""); + Response response = client.CreatePool(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreatePool_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateContent pool = new BatchPoolCreateContent("", ""); + Response response = await client.CreatePoolAsync(pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreatePool_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +} + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + resizeTimeout = "PT1H23M45S", + resourceTags = new + { + key = "", + }, + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }); + Response response = client.CreatePool(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreatePool_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +} + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + resizeTimeout = "PT1H23M45S", + resourceTags = new + { + key = "", + }, + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }); + Response response = await client.CreatePoolAsync(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreatePool_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateContent pool = new BatchPoolCreateContent("", "") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference +{ +Username = "", +Password = "", +RegistryServer = "", +IdentityReference = new BatchNodeIdentityReference +{ +ResourceId = "", +}, +}}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = +{ +["key"] = "" +}, + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }; + Response response = client.CreatePool(pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreatePool_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolCreateContent pool = new BatchPoolCreateContent("", "") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference +{ +Username = "", +Password = "", +RegistryServer = "", +IdentityReference = new BatchNodeIdentityReference +{ +ResourceId = "", +}, +}}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = +{ +["key"] = "" +}, + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }; + Response response = await client.CreatePoolAsync(pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeletePool_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeletePool(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeletePool_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeletePoolAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeletePool_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeletePool("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeletePool_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeletePoolAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPool_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPool_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync("", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPool_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPool_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetPoolAsync(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPool_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("allocationState").ToString()); + Console.WriteLine(result.GetProperty("allocationStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("resourceTags").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("currentDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("currentLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("timestamp").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("results").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("dedicatedCoreTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgCPUPercentage").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgMemoryGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakMemoryGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgDiskGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakDiskGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkReadGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkWriteGiB").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("clientId").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("principalId").ToString()); + Console.WriteLine(result.GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("currentNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPool_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetPool("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UpdatePool_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.UpdatePool("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UpdatePool_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.UpdatePoolAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UpdatePool_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + targetNodeCommunicationMode = "default", + }); + Response response = client.UpdatePool("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UpdatePool_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + targetNodeCommunicationMode = "default", + }); + Response response = await client.UpdatePoolAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisablePoolAutoScale_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DisablePoolAutoScale(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisablePoolAutoScale_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DisablePoolAutoScaleAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisablePoolAutoScale_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DisablePoolAutoScale("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisablePoolAutoScale_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DisablePoolAutoScaleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnablePoolAutoScale_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.EnablePoolAutoScale("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnablePoolAutoScale_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.EnablePoolAutoScaleAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnablePoolAutoScale_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent(); + Response response = client.EnablePoolAutoScale("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnablePoolAutoScale_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent(); + Response response = await client.EnablePoolAutoScaleAsync("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnablePoolAutoScale_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + }); + Response response = client.EnablePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnablePoolAutoScale_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + }); + Response response = await client.EnablePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnablePoolAutoScale_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent + { + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }; + Response response = client.EnablePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnablePoolAutoScale_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent + { + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }; + Response response = await client.EnablePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EvaluatePoolAutoScale_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + autoScaleFormula = "", + }); + Response response = client.EvaluatePoolAutoScale("", content); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("timestamp").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EvaluatePoolAutoScale_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + autoScaleFormula = "", + }); + Response response = await client.EvaluatePoolAutoScaleAsync("", content); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("timestamp").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EvaluatePoolAutoScale_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); + Response response = client.EvaluatePoolAutoScale("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EvaluatePoolAutoScale_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); + Response response = await client.EvaluatePoolAutoScaleAsync("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EvaluatePoolAutoScale_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + autoScaleFormula = "", + }); + Response response = client.EvaluatePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("timestamp").ToString()); + Console.WriteLine(result.GetProperty("results").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EvaluatePoolAutoScale_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + autoScaleFormula = "", + }); + Response response = await client.EvaluatePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("timestamp").ToString()); + Console.WriteLine(result.GetProperty("results").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EvaluatePoolAutoScale_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); + Response response = client.EvaluatePoolAutoScale("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EvaluatePoolAutoScale_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent(""); + Response response = await client.EvaluatePoolAutoScaleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ResizePool_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.ResizePool("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ResizePool_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.ResizePoolAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ResizePool_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolResizeContent content = new BatchPoolResizeContent(); + Response response = client.ResizePool("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ResizePool_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolResizeContent content = new BatchPoolResizeContent(); + Response response = await client.ResizePoolAsync("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ResizePool_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", + }); + Response response = client.ResizePool("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ResizePool_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", + }); + Response response = await client.ResizePoolAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ResizePool_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolResizeContent content = new BatchPoolResizeContent + { + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, + }; + Response response = client.ResizePool("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ResizePool_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolResizeContent content = new BatchPoolResizeContent + { + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, + }; + Response response = await client.ResizePoolAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_StopPoolResize_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.StopPoolResize(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_StopPoolResize_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.StopPoolResizeAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_StopPoolResize_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.StopPoolResize("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_StopPoolResize_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.StopPoolResizeAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplacePoolProperties_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + }); + Response response = client.ReplacePoolProperties("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplacePoolProperties_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + }); + Response response = await client.ReplacePoolPropertiesAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplacePoolProperties_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] + { +new BatchApplicationPackageReference("") + }, new MetadataItem[] + { +new MetadataItem("", "") + }); + Response response = client.ReplacePoolProperties("", pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplacePoolProperties_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] + { +new BatchApplicationPackageReference("") + }, new MetadataItem[] + { +new MetadataItem("", "") + }); + Response response = await client.ReplacePoolPropertiesAsync("", pool); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplacePoolProperties_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + targetNodeCommunicationMode = "default", + }); + Response response = client.ReplacePoolProperties("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplacePoolProperties_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + targetNodeCommunicationMode = "default", + }); + Response response = await client.ReplacePoolPropertiesAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplacePoolProperties_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] + { +new BatchApplicationPackageReference("") +{ +Version = "", +} + }, new MetadataItem[] + { +new MetadataItem("", "") + }) + { + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + }; + Response response = client.ReplacePoolProperties("", pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplacePoolProperties_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchPoolReplaceContent pool = new BatchPoolReplaceContent(new BatchApplicationPackageReference[] + { +new BatchApplicationPackageReference("") +{ +Version = "", +} + }, new MetadataItem[] + { +new MetadataItem("", "") + }) + { + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + }; + Response response = await client.ReplacePoolPropertiesAsync("", pool, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RemoveNodes_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeList = new object[] + { +"" + }, + }); + Response response = client.RemoveNodes("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RemoveNodes_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeList = new object[] + { +"" + }, + }); + Response response = await client.RemoveNodesAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RemoveNodes_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }); + Response response = client.RemoveNodes("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RemoveNodes_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }); + Response response = await client.RemoveNodesAsync("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RemoveNodes_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeList = new object[] + { +"" + }, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", + }); + Response response = client.RemoveNodes("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RemoveNodes_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeList = new object[] + { +"" + }, + resizeTimeout = "PT1H23M45S", + nodeDeallocationOption = "requeue", + }); + Response response = await client.RemoveNodesAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RemoveNodes_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }) + { + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, + }; + Response response = client.RemoveNodes("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RemoveNodes_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "" }) + { + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + NodeDeallocationOption = BatchNodeDeallocationOption.Requeue, + }; + Response response = await client.RemoveNodesAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteJob(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteJobAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteJob("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteJobAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJob("", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobAsync("", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJob_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJob(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJob_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobAsync(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJob("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("terminateReason").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJob_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJob("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UpdateJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.UpdateJob("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UpdateJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.UpdateJobAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UpdateJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +} + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { +null + }, + }); + Response response = client.UpdateJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UpdateJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +} + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { +null + }, + }); + Response response = await client.UpdateJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + poolInfo = new object(), + }); + Response response = client.ReplaceJob("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + poolInfo = new object(), + }); + Response response = await client.ReplaceJobAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJob_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJob job = new BatchJob(new BatchPoolInfo()); + Response response = client.ReplaceJob("", job); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJob_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJob job = new BatchJob(new BatchPoolInfo()); + Response response = await client.ReplaceJobAsync("", job); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +} + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { +null + }, + }); + Response response = client.ReplaceJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +} + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + metadata = new object[] + { +null + }, + }); + Response response = await client.ReplaceJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJob_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJob job = new BatchJob(new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference +{ +Username = "", +Password = "", +RegistryServer = "", +IdentityReference = new BatchNodeIdentityReference +{ +ResourceId = "", +}, +}}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + Metadata = { default }, + }; + Response response = client.ReplaceJob("", job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJob_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJob job = new BatchJob(new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = {new ContainerRegistryReference +{ +Username = "", +Password = "", +RegistryServer = "", +IdentityReference = new BatchNodeIdentityReference +{ +ResourceId = "", +}, +}}, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = default, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + Metadata = { default }, + }; + Response response = await client.ReplaceJobAsync("", job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + disableTasks = "requeue", + }); + Response response = client.DisableJob("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + disableTasks = "requeue", + }); + Response response = await client.DisableJobAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableJob_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); + Response response = client.DisableJob("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableJob_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); + Response response = await client.DisableJobAsync("", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + disableTasks = "requeue", + }); + Response response = client.DisableJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + disableTasks = "requeue", + }); + Response response = await client.DisableJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableJob_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); + Response response = client.DisableJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableJob_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); + Response response = await client.DisableJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnableJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.EnableJob(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnableJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.EnableJobAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnableJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.EnableJob("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnableJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.EnableJobAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = client.TerminateJob("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = await client.TerminateJobAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateJob_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.TerminateJob(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateJob_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.TerminateJobAsync(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + terminateReason = "", + }); + Response response = client.TerminateJob("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + terminateReason = "", + }); + Response response = await client.TerminateJobAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateJob_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobTerminateContent parameters = new BatchJobTerminateContent + { + TerminationReason = "", + }; + Response response = client.TerminateJob("", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateJob_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobTerminateContent parameters = new BatchJobTerminateContent + { + TerminationReason = "", + }; + Response response = await client.TerminateJobAsync("", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJob_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + poolInfo = new object(), + }); + Response response = client.CreateJob(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJob_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + poolInfo = new object(), + }); + Response response = await client.CreateJobAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJob_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo()); + Response response = client.CreateJob(job); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJob_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo()); + Response response = await client.CreateJobAsync(job); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJob_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + usesTaskDependencies = true, + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + metadata = new object[] + { +null + }, + }); + Response response = client.CreateJob(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJob_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + usesTaskDependencies = true, + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + metadata = new object[] + { +null + }, + }); + Response response = await client.CreateJobAsync(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJob_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + DisplayName = "", + UsesTaskDependencies = true, + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Metadata = { default }, + }; + Response response = client.CreateJob(job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJob_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobCreateContent job = new BatchJobCreateContent("", new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + DisplayName = "", + UsesTaskDependencies = true, + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Metadata = { default }, + }; + Response response = await client.CreateJobAsync(job, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobTaskCounts_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobTaskCounts("", null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobTaskCounts_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobTaskCountsAsync("", null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobTaskCounts_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobTaskCounts(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobTaskCounts_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobTaskCountsAsync(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobTaskCounts_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobTaskCounts("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobTaskCounts_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobTaskCountsAsync("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskCounts").GetProperty("failed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("active").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("completed").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("succeeded").ToString()); + Console.WriteLine(result.GetProperty("taskSlotCounts").GetProperty("failed").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobTaskCounts_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobTaskCounts("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobTaskCounts_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobTaskCountsAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteJobSchedule(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteJobScheduleAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobSchedule("", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobScheduleAsync("", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedule_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobSchedule(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobSchedule_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetJobScheduleAsync(""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobSchedule("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunUntil").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunAfter").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("startWindow").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("recurrenceInterval").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("nextRunTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedule_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UpdateJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.UpdateJobSchedule("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UpdateJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.UpdateJobScheduleAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UpdateJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { +null + }, + }, + metadata = new object[] + { +null + }, + }); + Response response = client.UpdateJobSchedule("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UpdateJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { +null + }, + }, + metadata = new object[] + { +null + }, + }); + Response response = await client.UpdateJobScheduleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + jobSpecification = new + { + poolInfo = new object(), + }, + }); + Response response = client.ReplaceJobSchedule("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + jobSpecification = new + { + poolInfo = new object(), + }, + }); + Response response = await client.ReplaceJobScheduleAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJobSchedule_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo())); + Response response = client.ReplaceJobSchedule("", jobSchedule); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJobSchedule_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo())); + Response response = await client.ReplaceJobScheduleAsync("", jobSchedule); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { +null + }, + }, + metadata = new object[] + { +null + }, + }); + Response response = client.ReplaceJobSchedule("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { +null + }, + }, + metadata = new object[] + { +null + }, + }); + Response response = await client.ReplaceJobScheduleAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceJobSchedule_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, + }) + { + Schedule = new BatchJobScheduleConfiguration + { + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }, + Metadata = { default }, + }; + Response response = client.ReplaceJobSchedule("", jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceJobSchedule_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobSchedule jobSchedule = new BatchJobSchedule(new BatchJobSpecification(new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, + }) + { + Schedule = new BatchJobScheduleConfiguration + { + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }, + Metadata = { default }, + }; + Response response = await client.ReplaceJobScheduleAsync("", jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DisableJobSchedule(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DisableJobScheduleAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DisableJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DisableJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnableJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.EnableJobSchedule(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnableJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.EnableJobScheduleAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnableJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.EnableJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnableJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.EnableJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.TerminateJobSchedule(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.TerminateJobScheduleAsync(""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.TerminateJobSchedule("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.TerminateJobScheduleAsync("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJobSchedule_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + schedule = new object(), + jobSpecification = new + { + poolInfo = new object(), + }, + }); + Response response = client.CreateJobSchedule(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJobSchedule_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + schedule = new object(), + jobSpecification = new + { + poolInfo = new object(), + }, + }); + Response response = await client.CreateJobScheduleAsync(content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJobSchedule_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration(), new BatchJobSpecification(new BatchPoolInfo())); + Response response = client.CreateJobSchedule(jobSchedule); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJobSchedule_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration(), new BatchJobSpecification(new BatchPoolInfo())); + Response response = await client.CreateJobScheduleAsync(jobSchedule); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJobSchedule_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { +null + }, + }, + metadata = new object[] + { +null + }, + }); + Response response = client.CreateJobSchedule(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJobSchedule_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + schedule = new + { + doNotRunUntil = "2022-05-10T18:57:31.2311892Z", + doNotRunAfter = "2022-05-10T18:57:31.2311892Z", + startWindow = "PT1H23M45S", + recurrenceInterval = "PT1H23M45S", + }, + jobSpecification = new + { + priority = 1234, + allowTaskPreemption = true, + maxParallelTasks = 1234, + displayName = "", + usesTaskDependencies = true, + onAllTasksComplete = "noaction", + onTaskFailure = "noaction", + networkConfiguration = new + { + subnetId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + jobManagerTask = new + { + id = "", + displayName = "", + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + killJobOnCompletion = true, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + runExclusive = true, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + allowLowPriorityNode = true, + }, + jobPreparationTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + waitForSuccess = true, + rerunOnNodeRebootAfterSuccess = true, + }, + jobReleaseTask = new + { + id = "", + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + }, + commonEnvironmentSettings = new object[] + { +null + }, + poolInfo = new + { + poolId = "", + autoPoolSpecification = new + { + autoPoolIdPrefix = "", + poolLifetimeOption = "jobschedule", + keepAlive = true, + pool = new + { + displayName = "", + vmSize = "", + virtualMachineConfiguration = new + { + imageReference = new + { + publisher = "", + offer = "", + sku = "", + version = "", + virtualMachineImageId = "", + }, + nodeAgentSKUId = "", + windowsConfiguration = new + { + enableAutomaticUpdates = true, + }, + dataDisks = new object[] + { +new +{ +lun = 1234, +caching = "none", +diskSizeGB = 1234, +storageAccountType = "standard_lrs", +} + }, + licenseType = "", + containerConfiguration = new + { + type = "dockerCompatible", + containerImageNames = new object[] + { +"" + }, + containerRegistries = new object[] + { +null + }, + }, + diskEncryptionConfiguration = new + { + targets = new object[] + { +"osdisk" + }, + }, + nodePlacementConfiguration = new + { + policy = "regional", + }, + extensions = new object[] + { +new +{ +name = "", +publisher = "", +type = "", +typeHandlerVersion = "", +autoUpgradeMinorVersion = true, +enableAutomaticUpgrade = true, +settings = new +{ +key = "", +}, +protectedSettings = new +{ +key = "", +}, +provisionAfterExtensions = new object[] +{ +"" +}, +} + }, + osDisk = new + { + ephemeralOSDiskSettings = new + { + placement = "cachedisk", + }, + caching = "none", + diskSizeGB = 1234, + managedDisk = new + { + storageAccountType = "standard_lrs", + }, + writeAcceleratorEnabled = true, + }, + securityProfile = new + { + encryptionAtHost = true, + securityType = "trustedLaunch", + uefiSettings = new + { + secureBootEnabled = true, + vTpmEnabled = true, + }, + }, + serviceArtifactReference = new + { + id = "", + }, + }, + taskSlotsPerNode = 1234, + taskSchedulingPolicy = new + { + nodeFillType = "spread", + }, + resizeTimeout = "PT1H23M45S", + resourceTags = "", + targetDedicatedNodes = 1234, + targetLowPriorityNodes = 1234, + enableAutoScale = true, + autoScaleFormula = "", + autoScaleEvaluationInterval = "PT1H23M45S", + enableInterNodeCommunication = true, + networkConfiguration = new + { + subnetId = "", + dynamicVNetAssignmentScope = "none", + endpointConfiguration = new + { + inboundNATPools = new object[] + { +new +{ +name = "", +protocol = "tcp", +backendPort = 1234, +frontendPortRangeStart = 1234, +frontendPortRangeEnd = 1234, +networkSecurityGroupRules = new object[] +{ +new +{ +priority = 1234, +access = "allow", +sourceAddressPrefix = "", +sourcePortRanges = new object[] +{ +"" +}, +} +}, +} + }, + }, + publicIPAddressConfiguration = new + { + provision = "batchmanaged", + ipAddressIds = new object[] + { +"" + }, + }, + enableAcceleratedNetworking = true, + }, + startTask = new + { + commandLine = "", + resourceFiles = new object[] + { +null + }, + environmentSettings = new object[] + { +null + }, + maxTaskRetryCount = 1234, + waitForSuccess = true, + }, + applicationPackageReferences = new object[] + { +null + }, + userAccounts = new object[] + { +new +{ +name = "", +password = "", +elevationLevel = "nonadmin", +linuxUserConfiguration = new +{ +uid = 1234, +gid = 1234, +sshPrivateKey = "", +}, +windowsUserConfiguration = new +{ +loginMode = "batch", +}, +} + }, + metadata = new object[] + { +new +{ +name = "", +value = "", +} + }, + mountConfiguration = new object[] + { +new +{ +azureBlobFileSystemConfiguration = new +{ +accountName = "", +containerName = "", +accountKey = "", +sasKey = "", +blobfuseOptions = "", +relativeMountPath = "", +}, +nfsMountConfiguration = new +{ +source = "", +relativeMountPath = "", +mountOptions = "", +}, +cifsMountConfiguration = new +{ +username = "", +source = "", +relativeMountPath = "", +mountOptions = "", +password = "", +}, +azureFileShareConfiguration = new +{ +accountName = "", +azureFileUrl = "", +accountKey = "", +relativeMountPath = "", +mountOptions = "", +}, +} + }, + targetNodeCommunicationMode = "default", + upgradePolicy = new + { + mode = "automatic", + automaticOSUpgradePolicy = new + { + disableAutomaticRollback = true, + enableAutomaticOSUpgrade = true, + useRollingUpgradePolicy = true, + osRollingUpgradeDeferral = true, + }, + rollingUpgradePolicy = new + { + enableCrossZoneUpgrade = true, + maxBatchInstancePercent = 1234, + maxUnhealthyInstancePercent = 1234, + maxUnhealthyUpgradedInstancePercent = 1234, + pauseTimeBetweenBatches = "PT1H23M45S", + prioritizeUnhealthyInstances = true, + rollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }, + metadata = new object[] + { +null + }, + }, + metadata = new object[] + { +null + }, + }); + Response response = await client.CreateJobScheduleAsync(content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateJobSchedule_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration + { + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }, new BatchJobSpecification(new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, + }) + { + DisplayName = "", + Metadata = { default }, + }; + Response response = client.CreateJobSchedule(jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateJobSchedule_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("", new BatchJobScheduleConfiguration + { + DoNotRunUntil = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + DoNotRunAfter = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + StartWindow = XmlConvert.ToTimeSpan("PT1H23M45S"), + RecurrenceInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + }, new BatchJobSpecification(new BatchPoolInfo + { + PoolId = "", + AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.JobSchedule) + { + AutoPoolIdPrefix = "", + KeepAlive = true, + Pool = new BatchPoolSpecification("") + { + DisplayName = "", + VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + { + Publisher = "", + Offer = "", + Sku = "", + Version = "", + VirtualMachineImageId = "", + }, "") + { + WindowsConfiguration = new WindowsConfiguration + { + EnableAutomaticUpdates = true, + }, + DataDisks = {new DataDisk(1234, 1234) +{ +Caching = CachingType.None, +StorageAccountType = StorageAccountType.StandardLRS, +}}, + LicenseType = "", + ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + { + ContainerImageNames = { "" }, + ContainerRegistries = { default }, + }, + DiskEncryptionConfiguration = new DiskEncryptionConfiguration + { + Targets = { DiskEncryptionTarget.OsDisk }, + }, + NodePlacementConfiguration = new BatchNodePlacementConfiguration + { + Policy = BatchNodePlacementPolicyType.Regional, + }, + Extensions = {new VMExtension("", "", "") +{ +TypeHandlerVersion = "", +AutoUpgradeMinorVersion = true, +EnableAutomaticUpgrade = true, +Settings = +{ +["key"] = "" +}, +ProtectedSettings = +{ +["key"] = "" +}, +ProvisionAfterExtensions = {""}, +}}, + OsDisk = new OSDisk + { + EphemeralOSDiskSettings = new DiffDiskSettings + { + Placement = DiffDiskPlacement.CacheDisk, + }, + Caching = CachingType.None, + DiskSizeGB = 1234, + ManagedDisk = new ManagedDisk(StorageAccountType.StandardLRS), + WriteAcceleratorEnabled = true, + }, + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + { + SecureBootEnabled = true, + VTpmEnabled = true, + }), + ServiceArtifactReference = new ServiceArtifactReference(""), + }, + TaskSlotsPerNode = 1234, + TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), + ResizeTimeout = XmlConvert.ToTimeSpan("PT1H23M45S"), + ResourceTags = "", + TargetDedicatedNodes = 1234, + TargetLowPriorityNodes = 1234, + EnableAutoScale = true, + AutoScaleFormula = "", + AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT1H23M45S"), + EnableInterNodeCommunication = true, + NetworkConfiguration = new NetworkConfiguration + { + SubnetId = "", + DynamicVNetAssignmentScope = DynamicVNetAssignmentScope.None, + EndpointConfiguration = new BatchPoolEndpointConfiguration(new InboundNatPool[] + { +new InboundNatPool("", InboundEndpointProtocol.Tcp, 1234, 1234, 1234) +{ +NetworkSecurityGroupRules = {new NetworkSecurityGroupRule(1234, NetworkSecurityGroupRuleAccess.Allow, "") +{ +SourcePortRanges = {""}, +}}, +} + }), + PublicIpAddressConfiguration = new PublicIpAddressConfiguration + { + IpAddressProvisioningType = IpAddressProvisioningType.BatchManaged, + IpAddressIds = { "" }, + }, + EnableAcceleratedNetworking = true, + }, + StartTask = new BatchStartTask("") + { + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + UserIdentity = default, + MaxTaskRetryCount = 1234, + WaitForSuccess = true, + }, + ApplicationPackageReferences = { default }, + UserAccounts = {new UserAccount("", "") +{ +ElevationLevel = ElevationLevel.NonAdmin, +LinuxUserConfiguration = new LinuxUserConfiguration +{ +Uid = 1234, +Gid = 1234, +SshPrivateKey = "", +}, +WindowsUserConfiguration = new WindowsUserConfiguration +{ +LoginMode = LoginMode.Batch, +}, +}}, + Metadata = { new MetadataItem("", "") }, + MountConfiguration = {new MountConfiguration +{ +AzureBlobFileSystemConfiguration = new AzureBlobFileSystemConfiguration("", "", "") +{ +AccountKey = "", +SasKey = "", +BlobfuseOptions = "", +IdentityReference = default, +}, +NfsMountConfiguration = new NfsMountConfiguration("", "") +{ +MountOptions = "", +}, +CifsMountConfiguration = new CifsMountConfiguration("", "", "", "") +{ +MountOptions = "", +}, +AzureFileShareConfiguration = new AzureFileShareConfiguration("", "", "", "") +{ +MountOptions = "", +}, +}}, + TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, + UpgradePolicy = new UpgradePolicy(UpgradeMode.Automatic) + { + AutomaticOsUpgradePolicy = new AutomaticOsUpgradePolicy + { + DisableAutomaticRollback = true, + EnableAutomaticOsUpgrade = true, + UseRollingUpgradePolicy = true, + OsRollingUpgradeDeferral = true, + }, + RollingUpgradePolicy = new RollingUpgradePolicy + { + EnableCrossZoneUpgrade = true, + MaxBatchInstancePercent = 1234, + MaxUnhealthyInstancePercent = 1234, + MaxUnhealthyUpgradedInstancePercent = 1234, + PauseTimeBetweenBatches = XmlConvert.ToTimeSpan("PT1H23M45S"), + PrioritizeUnhealthyInstances = true, + RollbackFailedInstancesOnPolicyBreach = true, + }, + }, + }, + }, + }) + { + Priority = 1234, + AllowTaskPreemption = true, + MaxParallelTasks = 1234, + DisplayName = "", + UsesTaskDependencies = true, + OnAllTasksComplete = OnAllBatchTasksComplete.NoAction, + OnTaskFailure = OnBatchTaskFailure.NoAction, + NetworkConfiguration = new BatchJobNetworkConfiguration(""), + Constraints = new BatchJobConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + JobManagerTask = new BatchJobManagerTask("", "") + { + DisplayName = "", + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + KillJobOnCompletion = true, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + RunExclusive = true, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + AllowLowPriorityNode = true, + }, + JobPreparationTask = new BatchJobPreparationTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + Constraints = default, + WaitForSuccess = true, + UserIdentity = default, + RerunOnNodeRebootAfterSuccess = true, + }, + JobReleaseTask = new BatchJobReleaseTask("") + { + Id = "", + ContainerSettings = default, + ResourceFiles = { default }, + EnvironmentSettings = { default }, + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + UserIdentity = default, + }, + CommonEnvironmentSettings = { default }, + Metadata = { default }, + }) + { + DisplayName = "", + Metadata = { default }, + }; + Response response = await client.CreateJobScheduleAsync(jobSchedule, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTask_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + commandLine = "", + }); + Response response = client.CreateTask("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTask_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + commandLine = "", + }); + Response response = await client.CreateTaskAsync("", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTask_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("", ""); + Response response = client.CreateTask("", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTask_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("", ""); + Response response = await client.CreateTaskAsync("", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTask_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + exitConditions = new Dictionary + { + ["exitCodes"] = new object[] + { +new +{ +code = 1234, +exitOptions = new +{ +jobAction = "none", +dependencyAction = "satisfy", +}, +} + }, + ["exitCodeRanges"] = new object[] + { +new +{ +start = 1234, +end = 1234, +} + }, + ["preProcessingError"] = null, + ["fileUploadError"] = null, + ["default"] = null + }, + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + affinityInfo = new + { + affinityId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + multiInstanceSettings = new + { + numberOfInstances = 1234, + coordinationCommandLine = "", + commonResourceFiles = new object[] + { +null + }, + }, + dependsOn = new + { + taskIds = new object[] + { +"" + }, + taskIdRanges = new object[] + { +new +{ +start = 1234, +end = 1234, +} + }, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + }); + Response response = client.CreateTask("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTask_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + id = "", + displayName = "", + exitConditions = new Dictionary + { + ["exitCodes"] = new object[] + { +new +{ +code = 1234, +exitOptions = new +{ +jobAction = "none", +dependencyAction = "satisfy", +}, +} + }, + ["exitCodeRanges"] = new object[] + { +new +{ +start = 1234, +end = 1234, +} + }, + ["preProcessingError"] = null, + ["fileUploadError"] = null, + ["default"] = null + }, + commandLine = "", + containerSettings = new + { + containerRunOptions = "", + imageName = "", + registry = new + { + username = "", + password = "", + registryServer = "", + identityReference = new + { + resourceId = "", + }, + }, + workingDirectory = "taskWorkingDirectory", + }, + resourceFiles = new object[] + { +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} + }, + outputFiles = new object[] + { +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} + }, + environmentSettings = new object[] + { +new +{ +name = "", +value = "", +} + }, + affinityInfo = new + { + affinityId = "", + }, + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + requiredSlots = 1234, + userIdentity = new + { + username = "", + autoUser = new + { + scope = "task", + elevationLevel = "nonadmin", + }, + }, + multiInstanceSettings = new + { + numberOfInstances = 1234, + coordinationCommandLine = "", + commonResourceFiles = new object[] + { +null + }, + }, + dependsOn = new + { + taskIds = new object[] + { +"" + }, + taskIdRanges = new object[] + { +new +{ +start = 1234, +end = 1234, +} + }, + }, + applicationPackageReferences = new object[] + { +new +{ +applicationId = "", +version = "", +} + }, + authenticationTokenSettings = new + { + access = new object[] + { +"job" + }, + }, + }); + Response response = await client.CreateTaskAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTask_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("", "") + { + DisplayName = "", + ExitConditions = new ExitConditions + { + ExitCodes = {new ExitCodeMapping(1234, new ExitOptions +{ +JobAction = BatchJobAction.None, +DependencyAction = DependencyAction.Satisfy, +})}, + ExitCodeRanges = { new ExitCodeRangeMapping(1234, 1234, default) }, + PreProcessingError = default, + FileUploadError = default, + Default = default, + }, + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + AffinityInfo = new AffinityInfo(""), + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MultiInstanceSettings = new MultiInstanceSettings("") + { + NumberOfInstances = 1234, + CommonResourceFiles = { default }, + }, + DependsOn = new BatchTaskDependencies + { + TaskIds = { "" }, + TaskIdRanges = { new BatchTaskIdRange(1234, 1234) }, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + }; + Response response = client.CreateTask("", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTask_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskCreateContent task = new BatchTaskCreateContent("", "") + { + DisplayName = "", + ExitConditions = new ExitConditions + { + ExitCodes = {new ExitCodeMapping(1234, new ExitOptions +{ +JobAction = BatchJobAction.None, +DependencyAction = DependencyAction.Satisfy, +})}, + ExitCodeRanges = { new ExitCodeRangeMapping(1234, 1234, default) }, + PreProcessingError = default, + FileUploadError = default, + Default = default, + }, + ContainerSettings = new BatchTaskContainerSettings("") + { + ContainerRunOptions = "", + Registry = new ContainerRegistryReference + { + Username = "", + Password = "", + RegistryServer = "", + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }, + WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, + }, + ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, + OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, + EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, + AffinityInfo = new AffinityInfo(""), + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + RequiredSlots = 1234, + UserIdentity = new UserIdentity + { + Username = "", + AutoUser = new AutoUserSpecification + { + Scope = AutoUserScope.Task, + ElevationLevel = ElevationLevel.NonAdmin, + }, + }, + MultiInstanceSettings = new MultiInstanceSettings("") + { + NumberOfInstances = 1234, + CommonResourceFiles = { default }, + }, + DependsOn = new BatchTaskDependencies + { + TaskIds = { "" }, + TaskIdRanges = { new BatchTaskIdRange(1234, 1234) }, + }, + ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, + AuthenticationTokenSettings = new AuthenticationTokenSettings + { + Access = { AccessScope.Job }, + }, + }; + Response response = await client.CreateTaskAsync("", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTaskCollection_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + value = new object[] + { +new +{ +id = "", +commandLine = "", +} + }, + }); + Response response = client.CreateTaskCollection("", content); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTaskCollection_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + value = new object[] + { +new +{ +id = "", +commandLine = "", +} + }, + }); + Response response = await client.CreateTaskCollectionAsync("", content); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTaskCollection_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + { +new BatchTaskCreateContent("", "") + }); + Response response = client.CreateTaskCollection("", taskCollection); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTaskCollection_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + { +new BatchTaskCreateContent("", "") + }); + Response response = await client.CreateTaskCollectionAsync("", taskCollection); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTaskCollection_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + value = new object[] + { +new +{ +id = "", +displayName = "", +exitConditions = new Dictionary +{ +["exitCodes"] = new object[] +{ +new +{ +code = 1234, +exitOptions = new +{ +jobAction = "none", +dependencyAction = "satisfy", +}, +} +}, +["exitCodeRanges"] = new object[] +{ +new +{ +start = 1234, +end = 1234, +} +}, +["preProcessingError"] = null, +["fileUploadError"] = null, +["default"] = null +}, +commandLine = "", +containerSettings = new +{ +containerRunOptions = "", +imageName = "", +registry = new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +}, +workingDirectory = "taskWorkingDirectory", +}, +resourceFiles = new object[] +{ +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} +}, +outputFiles = new object[] +{ +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} +}, +environmentSettings = new object[] +{ +new +{ +name = "", +value = "", +} +}, +affinityInfo = new +{ +affinityId = "", +}, +constraints = new +{ +maxWallClockTime = "PT1H23M45S", +retentionTime = "PT1H23M45S", +maxTaskRetryCount = 1234, +}, +requiredSlots = 1234, +userIdentity = new +{ +username = "", +autoUser = new +{ +scope = "task", +elevationLevel = "nonadmin", +}, +}, +multiInstanceSettings = new +{ +numberOfInstances = 1234, +coordinationCommandLine = "", +commonResourceFiles = new object[] +{ +null +}, +}, +dependsOn = new +{ +taskIds = new object[] +{ +"" +}, +taskIdRanges = new object[] +{ +new +{ +start = 1234, +end = 1234, +} +}, +}, +applicationPackageReferences = new object[] +{ +new +{ +applicationId = "", +version = "", +} +}, +authenticationTokenSettings = new +{ +access = new object[] +{ +"job" +}, +}, +} + }, + }); + Response response = client.CreateTaskCollection("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("value")[0].GetProperty("status").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("taskId").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("location").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("lang").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("key").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTaskCollection_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + value = new object[] + { +new +{ +id = "", +displayName = "", +exitConditions = new Dictionary +{ +["exitCodes"] = new object[] +{ +new +{ +code = 1234, +exitOptions = new +{ +jobAction = "none", +dependencyAction = "satisfy", +}, +} +}, +["exitCodeRanges"] = new object[] +{ +new +{ +start = 1234, +end = 1234, +} +}, +["preProcessingError"] = null, +["fileUploadError"] = null, +["default"] = null +}, +commandLine = "", +containerSettings = new +{ +containerRunOptions = "", +imageName = "", +registry = new +{ +username = "", +password = "", +registryServer = "", +identityReference = new +{ +resourceId = "", +}, +}, +workingDirectory = "taskWorkingDirectory", +}, +resourceFiles = new object[] +{ +new +{ +autoStorageContainerName = "", +storageContainerUrl = "", +httpUrl = "", +blobPrefix = "", +filePath = "", +fileMode = "", +} +}, +outputFiles = new object[] +{ +new +{ +filePattern = "", +destination = new +{ +container = new +{ +path = "", +containerUrl = "", +uploadHeaders = new object[] +{ +new +{ +name = "", +value = "", +} +}, +}, +}, +uploadOptions = new +{ +uploadCondition = "tasksuccess", +}, +} +}, +environmentSettings = new object[] +{ +new +{ +name = "", +value = "", +} +}, +affinityInfo = new +{ +affinityId = "", +}, +constraints = new +{ +maxWallClockTime = "PT1H23M45S", +retentionTime = "PT1H23M45S", +maxTaskRetryCount = 1234, +}, +requiredSlots = 1234, +userIdentity = new +{ +username = "", +autoUser = new +{ +scope = "task", +elevationLevel = "nonadmin", +}, +}, +multiInstanceSettings = new +{ +numberOfInstances = 1234, +coordinationCommandLine = "", +commonResourceFiles = new object[] +{ +null +}, +}, +dependsOn = new +{ +taskIds = new object[] +{ +"" +}, +taskIdRanges = new object[] +{ +new +{ +start = 1234, +end = 1234, +} +}, +}, +applicationPackageReferences = new object[] +{ +new +{ +applicationId = "", +version = "", +} +}, +authenticationTokenSettings = new +{ +access = new object[] +{ +"job" +}, +}, +} + }, + }); + Response response = await client.CreateTaskCollectionAsync("", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("value")[0].GetProperty("status").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("taskId").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("location").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("lang").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("message").GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("key").ToString()); + Console.WriteLine(result.GetProperty("value")[0].GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateTaskCollection_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + { +new BatchTaskCreateContent("", "") +{ +DisplayName = "", +ExitConditions = new ExitConditions +{ +ExitCodes = {new ExitCodeMapping(1234, new ExitOptions +{ +JobAction = BatchJobAction.None, +DependencyAction = DependencyAction.Satisfy, +})}, +ExitCodeRanges = {new ExitCodeRangeMapping(1234, 1234, default)}, +PreProcessingError = default, +FileUploadError = default, +Default = default, +}, +ContainerSettings = new BatchTaskContainerSettings("") +{ +ContainerRunOptions = "", +Registry = new ContainerRegistryReference +{ +Username = "", +Password = "", +RegistryServer = "", +IdentityReference = new BatchNodeIdentityReference +{ +ResourceId = "", +}, +}, +WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, +}, +ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, +OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, +EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, +AffinityInfo = new AffinityInfo(""), +Constraints = new BatchTaskConstraints +{ +MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), +RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), +MaxTaskRetryCount = 1234, +}, +RequiredSlots = 1234, +UserIdentity = new UserIdentity +{ +Username = "", +AutoUser = new AutoUserSpecification +{ +Scope = AutoUserScope.Task, +ElevationLevel = ElevationLevel.NonAdmin, +}, +}, +MultiInstanceSettings = new MultiInstanceSettings("") +{ +NumberOfInstances = 1234, +CommonResourceFiles = {default}, +}, +DependsOn = new BatchTaskDependencies +{ +TaskIds = {""}, +TaskIdRanges = {new BatchTaskIdRange(1234, 1234)}, +}, +ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, +AuthenticationTokenSettings = new AuthenticationTokenSettings +{ +Access = {AccessScope.Job}, +}, +} + }); + Response response = client.CreateTaskCollection("", taskCollection, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateTaskCollection_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + { +new BatchTaskCreateContent("", "") +{ +DisplayName = "", +ExitConditions = new ExitConditions +{ +ExitCodes = {new ExitCodeMapping(1234, new ExitOptions +{ +JobAction = BatchJobAction.None, +DependencyAction = DependencyAction.Satisfy, +})}, +ExitCodeRanges = {new ExitCodeRangeMapping(1234, 1234, default)}, +PreProcessingError = default, +FileUploadError = default, +Default = default, +}, +ContainerSettings = new BatchTaskContainerSettings("") +{ +ContainerRunOptions = "", +Registry = new ContainerRegistryReference +{ +Username = "", +Password = "", +RegistryServer = "", +IdentityReference = new BatchNodeIdentityReference +{ +ResourceId = "", +}, +}, +WorkingDirectory = ContainerWorkingDirectory.TaskWorkingDirectory, +}, +ResourceFiles = {new ResourceFile +{ +AutoStorageContainerName = "", +StorageContainerUrl = "", +HttpUrl = "", +BlobPrefix = "", +FilePath = "", +FileMode = "", +IdentityReference = default, +}}, +OutputFiles = {new OutputFile("", new OutputFileDestination +{ +Container = new OutputFileBlobContainerDestination("") +{ +Path = "", +IdentityReference = default, +UploadHeaders = {new HttpHeader("") +{ +Value = "", +}}, +}, +}, new OutputFileUploadConfig(OutputFileUploadCondition.TaskSuccess))}, +EnvironmentSettings = {new EnvironmentSetting("") +{ +Value = "", +}}, +AffinityInfo = new AffinityInfo(""), +Constraints = new BatchTaskConstraints +{ +MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), +RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), +MaxTaskRetryCount = 1234, +}, +RequiredSlots = 1234, +UserIdentity = new UserIdentity +{ +Username = "", +AutoUser = new AutoUserSpecification +{ +Scope = AutoUserScope.Task, +ElevationLevel = ElevationLevel.NonAdmin, +}, +}, +MultiInstanceSettings = new MultiInstanceSettings("") +{ +NumberOfInstances = 1234, +CommonResourceFiles = {default}, +}, +DependsOn = new BatchTaskDependencies +{ +TaskIds = {""}, +TaskIdRanges = {new BatchTaskIdRange(1234, 1234)}, +}, +ApplicationPackageReferences = {new BatchApplicationPackageReference("") +{ +Version = "", +}}, +AuthenticationTokenSettings = new AuthenticationTokenSettings +{ +Access = {AccessScope.Job}, +}, +} + }); + Response response = await client.CreateTaskCollectionAsync("", taskCollection, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteTask_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteTask("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteTask_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteTaskAsync("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteTask_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteTask_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTask_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTask("", "", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTask_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetTaskAsync("", "", null, null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTask_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTask("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTask_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetTaskAsync("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTask_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTask("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, new string[] { "" }, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("start").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("end").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("affinityInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("requeueCount").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("numberOfInstances").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("coordinationCommandLine").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIds")[0].ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("start").ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("end").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTask_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }, expand: new string[] { "" }, requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceTask_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.ReplaceTask("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceTask_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.ReplaceTaskAsync("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceTask_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTask task = new BatchTask(); + Response response = client.ReplaceTask("", "", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceTask_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTask task = new BatchTask(); + Response response = await client.ReplaceTaskAsync("", "", task); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceTask_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + }); + Response response = client.ReplaceTask("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceTask_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + constraints = new + { + maxWallClockTime = "PT1H23M45S", + retentionTime = "PT1H23M45S", + maxTaskRetryCount = 1234, + }, + }); + Response response = await client.ReplaceTaskAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceTask_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTask task = new BatchTask + { + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + }; + Response response = client.ReplaceTask("", "", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceTask_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchTask task = new BatchTask + { + Constraints = new BatchTaskConstraints + { + MaxWallClockTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + RetentionTime = XmlConvert.ToTimeSpan("PT1H23M45S"), + MaxTaskRetryCount = 1234, + }, + }; + Response response = await client.ReplaceTaskAsync("", "", task, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateTask_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.TerminateTask("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateTask_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.TerminateTaskAsync("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_TerminateTask_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.TerminateTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_TerminateTask_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.TerminateTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReactivateTask_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.ReactivateTask("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReactivateTask_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.ReactivateTaskAsync("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReactivateTask_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.ReactivateTask("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReactivateTask_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.ReactivateTaskAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), requestConditions: null); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteTaskFile_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteTaskFile("", "", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteTaskFile_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteTaskFileAsync("", "", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteTaskFile_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteTaskFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteTaskFile_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteTaskFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFile_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTaskFile("", "", "", null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFile_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetTaskFileAsync("", "", "", null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFile_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTaskFile("", "", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFile_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetTaskFileAsync("", "", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFile_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTaskFile("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFile_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetTaskFileAsync("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFile_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetTaskFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFile_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetTaskFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateNodeUser_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + name = "", + }); + Response response = client.CreateNodeUser("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateNodeUser_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + name = "", + }); + Response response = await client.CreateNodeUserAsync("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateNodeUser_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserCreateContent user = new BatchNodeUserCreateContent(""); + Response response = client.CreateNodeUser("", "", user); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateNodeUser_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserCreateContent user = new BatchNodeUserCreateContent(""); + Response response = await client.CreateNodeUserAsync("", "", user); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateNodeUser_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + name = "", + isAdmin = true, + expiryTime = "2022-05-10T18:57:31.2311892Z", + password = "", + sshPublicKey = "", + }); + Response response = client.CreateNodeUser("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateNodeUser_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + name = "", + isAdmin = true, + expiryTime = "2022-05-10T18:57:31.2311892Z", + password = "", + sshPublicKey = "", + }); + Response response = await client.CreateNodeUserAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_CreateNodeUser_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("") + { + IsAdmin = true, + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + Password = "", + SshPublicKey = "", + }; + Response response = client.CreateNodeUser("", "", user, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_CreateNodeUser_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("") + { + IsAdmin = true, + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + Password = "", + SshPublicKey = "", + }; + Response response = await client.CreateNodeUserAsync("", "", user, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteNodeUser_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteNodeUser("", "", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteNodeUser_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteNodeUserAsync("", "", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteNodeUser_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteNodeUser("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteNodeUser_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteNodeUserAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceNodeUser_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = client.ReplaceNodeUser("", "", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceNodeUser_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new object()); + Response response = await client.ReplaceNodeUserAsync("", "", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceNodeUser_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent(); + Response response = client.ReplaceNodeUser("", "", "", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceNodeUser_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent(); + Response response = await client.ReplaceNodeUserAsync("", "", "", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceNodeUser_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + password = "", + expiryTime = "2022-05-10T18:57:31.2311892Z", + sshPublicKey = "", + }); + Response response = client.ReplaceNodeUser("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceNodeUser_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + password = "", + expiryTime = "2022-05-10T18:57:31.2311892Z", + sshPublicKey = "", + }); + Response response = await client.ReplaceNodeUserAsync("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_ReplaceNodeUser_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent + { + Password = "", + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + SshPublicKey = "", + }; + Response response = client.ReplaceNodeUser("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_ReplaceNodeUser_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent + { + Password = "", + ExpiryTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + SshPublicKey = "", + }; + Response response = await client.ReplaceNodeUserAsync("", "", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNode_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNode("", "", null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNode_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeAsync("", "", null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNode_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNode("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNode_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeAsync("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNode_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNode("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("schedulingState").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("lastBootTime").ToString()); + Console.WriteLine(result.GetProperty("allocationTime").ToString()); + Console.WriteLine(result.GetProperty("ipAddress").ToString()); + Console.WriteLine(result.GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalTasksRun").ToString()); + Console.WriteLine(result.GetProperty("runningTasksCount").ToString()); + Console.WriteLine(result.GetProperty("runningTaskSlotsCount").ToString()); + Console.WriteLine(result.GetProperty("totalTasksSucceeded").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskUrl").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("jobId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("subtaskId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskState").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("requeueCount").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("isDedicated").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicIPAddress").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicFQDN").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("frontendPort").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("scaleSetVmResourceId").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNode_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNode("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RebootNode_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = client.RebootNode("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RebootNode_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = await client.RebootNodeAsync("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RebootNode_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.RebootNode("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RebootNode_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.RebootNodeAsync("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RebootNode_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeRebootOption = "requeue", + }); + Response response = client.RebootNode("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RebootNode_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeRebootOption = "requeue", + }); + Response response = await client.RebootNodeAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_RebootNode_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeRebootContent parameters = new BatchNodeRebootContent + { + NodeRebootOption = BatchNodeRebootOption.Requeue, + }; + Response response = client.RebootNode("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_RebootNode_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeRebootContent parameters = new BatchNodeRebootContent + { + NodeRebootOption = BatchNodeRebootOption.Requeue, + }; + Response response = await client.RebootNodeAsync("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableNodeScheduling_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = client.DisableNodeScheduling("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableNodeScheduling_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = null; + Response response = await client.DisableNodeSchedulingAsync("", "", content); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableNodeScheduling_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DisableNodeScheduling("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableNodeScheduling_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DisableNodeSchedulingAsync("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableNodeScheduling_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeDisableSchedulingOption = "requeue", + }); + Response response = client.DisableNodeScheduling("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableNodeScheduling_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + nodeDisableSchedulingOption = "requeue", + }); + Response response = await client.DisableNodeSchedulingAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DisableNodeScheduling_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeDisableSchedulingContent parameters = new BatchNodeDisableSchedulingContent + { + NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.Requeue, + }; + Response response = client.DisableNodeScheduling("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DisableNodeScheduling_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + BatchNodeDisableSchedulingContent parameters = new BatchNodeDisableSchedulingContent + { + NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.Requeue, + }; + Response response = await client.DisableNodeSchedulingAsync("", "", parameters: parameters, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnableNodeScheduling_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.EnableNodeScheduling("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnableNodeScheduling_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.EnableNodeSchedulingAsync("", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_EnableNodeScheduling_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.EnableNodeScheduling("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_EnableNodeScheduling_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.EnableNodeSchedulingAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeRemoteLoginSettings_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeRemoteLoginSettings("", "", null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); + Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeRemoteLoginSettings_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeRemoteLoginSettingsAsync("", "", null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); + Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeRemoteLoginSettings_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeRemoteLoginSettings("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeRemoteLoginSettings_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeRemoteLoginSettingsAsync("", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeRemoteLoginSettings_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeRemoteLoginSettings("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); + Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeRemoteLoginSettings_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeRemoteLoginSettingsAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("remoteLoginIPAddress").ToString()); + Console.WriteLine(result.GetProperty("remoteLoginPort").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeRemoteLoginSettings_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeRemoteLoginSettings("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeRemoteLoginSettings_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeRemoteLoginSettingsAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UploadNodeLogs_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", + }); + Response response = client.UploadNodeLogs("", "", content); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); + Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UploadNodeLogs_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", + }); + Response response = await client.UploadNodeLogsAsync("", "", content); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); + Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UploadNodeLogs_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")); + Response response = client.UploadNodeLogs("", "", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UploadNodeLogs_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")); + Response response = await client.UploadNodeLogsAsync("", "", content); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UploadNodeLogs_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", + endTime = "2022-05-10T18:57:31.2311892Z", + identityReference = new + { + resourceId = "", + }, + }); + Response response = client.UploadNodeLogs("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); + Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UploadNodeLogs_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + using RequestContent content = RequestContent.Create(new + { + containerUrl = "", + startTime = "2022-05-10T18:57:31.2311892Z", + endTime = "2022-05-10T18:57:31.2311892Z", + identityReference = new + { + resourceId = "", + }, + }); + Response response = await client.UploadNodeLogsAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("virtualDirectoryName").ToString()); + Console.WriteLine(result.GetProperty("numberOfFilesUploaded").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_UploadNodeLogs_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")) + { + EndTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }; + Response response = client.UploadNodeLogs("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_UploadNodeLogs_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("", DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z")) + { + EndTime = DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), + IdentityReference = new BatchNodeIdentityReference + { + ResourceId = "", + }, + }; + Response response = await client.UploadNodeLogsAsync("", "", content, timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT")); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtension_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeExtension("", "", "", null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeExtension_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeExtensionAsync("", "", "", null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtension_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeExtension("", "", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeExtension_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeExtensionAsync("", "", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtension_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeExtension("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.GetProperty("provisioningState").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("displayStatus").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("level").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("time").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("displayStatus").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("level").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("time").ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtension_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeExtension("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" }); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteNodeFile_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteNodeFile("", "", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteNodeFile_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteNodeFileAsync("", "", ""); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_DeleteNodeFile_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.DeleteNodeFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_DeleteNodeFile_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.DeleteNodeFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), recursive: true); + + Console.WriteLine(response.Status); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFile_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeFile("", "", "", null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFile_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeFileAsync("", "", "", null, null, null, null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFile_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeFile("", "", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFile_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeFileAsync("", "", ""); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFile_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeFile("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFile_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeFileAsync("", "", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), "", null, null); + + JsonElement result = JsonDocument.Parse(response.ContentStream).RootElement; + Console.WriteLine(result.ToString()); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFile_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = client.GetNodeFile("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFile_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + Response response = await client.GetNodeFileAsync("", "", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), ocpRange: "", requestConditions: null); + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplications_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetApplications(null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplications_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetApplicationsAsync(null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplications_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchApplication item in client.GetApplications()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplications_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchApplication item in client.GetApplicationsAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplications_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetApplications(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplications_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetApplicationsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("versions")[0].ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetApplications_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchApplication item in client.GetApplications(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234)) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetApplications_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchApplication item in client.GetApplicationsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234)) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolUsageMetrics_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetPoolUsageMetrics(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolUsageMetrics_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolUsageMetrics_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetrics()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolUsageMetrics_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetricsAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolUsageMetrics_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetPoolUsageMetrics(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), "", null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolUsageMetrics_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetPoolUsageMetricsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), "", null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalCoreHours").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolUsageMetrics_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetrics(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, starttime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), endtime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), filter: "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolUsageMetrics_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetricsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, starttime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), endtime: DateTimeOffset.Parse("2022-05-10T18:57:31.2311892Z"), filter: "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPools_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetPools(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPools_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetPoolsAsync(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPools_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchPool item in client.GetPools()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPools_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchPool item in client.GetPoolsAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPools_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetPools(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("allocationState").ToString()); + Console.WriteLine(result.GetProperty("allocationStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("resizeErrors")[0].GetProperty("values")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("resourceTags").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("currentDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("currentLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("timestamp").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("results").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("autoScaleRun").GetProperty("error").GetProperty("values")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("usageStats").GetProperty("dedicatedCoreTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgCPUPercentage").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgMemoryGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakMemoryGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("avgDiskGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("peakDiskGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskReadGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("diskWriteGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkReadGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("resourceStats").GetProperty("networkWriteGiB").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("clientId").ToString()); + Console.WriteLine(result.GetProperty("identity").GetProperty("userAssignedIdentities")[0].GetProperty("principalId").ToString()); + Console.WriteLine(result.GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("currentNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPools_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchPool item in client.GetPools(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSupportedImages_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetSupportedImages(null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetSupportedImages_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetSupportedImagesAsync(null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSupportedImages_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchSupportedImage item in client.GetSupportedImages()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetSupportedImages_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchSupportedImage item in client.GetSupportedImagesAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSupportedImages_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetSupportedImages(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("capabilities")[0].ToString()); + Console.WriteLine(result.GetProperty("batchSupportEndOfLife").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetSupportedImages_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetSupportedImagesAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("osType").ToString()); + Console.WriteLine(result.GetProperty("capabilities")[0].ToString()); + Console.WriteLine(result.GetProperty("batchSupportEndOfLife").ToString()); + Console.WriteLine(result.GetProperty("verificationType").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSupportedImages_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchSupportedImage item in client.GetSupportedImages(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetSupportedImages_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchSupportedImage item in client.GetSupportedImagesAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolNodeCounts_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetPoolNodeCounts(null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolNodeCounts_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetPoolNodeCountsAsync(null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolNodeCounts_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchPoolNodeCounts item in client.GetPoolNodeCounts()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolNodeCounts_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchPoolNodeCounts item in client.GetPoolNodeCountsAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolNodeCounts_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetPoolNodeCounts(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("upgradingOS").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("upgradingOS").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolNodeCounts_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetPoolNodeCountsAsync(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("dedicated").GetProperty("upgradingOS").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("creating").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("idle").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("offline").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("preempted").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("rebooting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("reimaging").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("running").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("starting").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("startTaskFailed").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("leavingPool").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unknown").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("unusable").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("waitingForStartTask").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("total").ToString()); + Console.WriteLine(result.GetProperty("lowPriority").GetProperty("upgradingOS").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetPoolNodeCounts_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchPoolNodeCounts item in client.GetPoolNodeCounts(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetPoolNodeCounts_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchPoolNodeCounts item in client.GetPoolNodeCountsAsync(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobs_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobs(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobs_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetJobsAsync(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobs_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJob item in client.GetJobs()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobs_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchJob item in client.GetJobsAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobs_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobs(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("terminateReason").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobs_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJob item in client.GetJobs(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobsFromSchedules_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobsFromSchedules("", null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobsFromSchedules_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetJobsFromSchedulesAsync("", null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolInfo").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobsFromSchedules_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJob item in client.GetJobsFromSchedules("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobsFromSchedules_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchJob item in client.GetJobsFromSchedulesAsync("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobsFromSchedules_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobsFromSchedules("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("schedulingError").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("terminateReason").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobsFromSchedules_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJob item in client.GetJobsFromSchedules("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobPreparationAndReleaseTaskStatuses_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatusesAsync("", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJobPreparationAndReleaseTaskStatus item in client.GetJobPreparationAndReleaseTaskStatuses("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobPreparationAndReleaseTaskStatuses_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchJobPreparationAndReleaseTaskStatus item in client.GetJobPreparationAndReleaseTaskStatusesAsync("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobPreparationAndReleaseTaskStatuses("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("jobPreparationTaskExecutionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobReleaseTaskExecutionInfo").GetProperty("result").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobPreparationAndReleaseTaskStatuses_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJobPreparationAndReleaseTaskStatus item in client.GetJobPreparationAndReleaseTaskStatuses("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedules_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobSchedules(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobSchedules_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetJobSchedulesAsync(null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedules_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJobSchedule item in client.GetJobSchedules()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetJobSchedules_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchJobSchedule item in client.GetJobSchedulesAsync()) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedules_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetJobSchedules(1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunUntil").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("doNotRunAfter").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("startWindow").ToString()); + Console.WriteLine(result.GetProperty("schedule").GetProperty("recurrenceInterval").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("allowTaskPreemption").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("maxParallelTasks").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("usesTaskDependencies").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onAllTasksComplete").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("onTaskFailure").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("killJobOnCompletion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("runExclusive").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobManagerTask").GetProperty("allowLowPriorityNode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobPreparationTask").GetProperty("rerunOnNodeRebootAfterSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("jobReleaseTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("commonEnvironmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("autoPoolIdPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("poolLifetimeOption").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("keepAlive").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodeAgentSKUId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("windowsConfiguration").GetProperty("enableAutomaticUpdates").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("lun").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("dataDisks")[0].GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("licenseType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerImageNames")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("containerConfiguration").GetProperty("containerRegistries")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("diskEncryptionConfiguration").GetProperty("targets")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("nodePlacementConfiguration").GetProperty("policy").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("extensions")[0].GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("ephemeralOSDiskSettings").GetProperty("placement").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("caching").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("diskSizeGB").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("managedDisk").GetProperty("storageAccountType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("osDisk").GetProperty("writeAcceleratorEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("encryptionAtHost").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("securityType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("secureBootEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("securityProfile").GetProperty("uefiSettings").GetProperty("vTpmEnabled").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("virtualMachineConfiguration").GetProperty("serviceArtifactReference").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSlotsPerNode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("taskSchedulingPolicy").GetProperty("nodeFillType").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resizeTimeout").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("resourceTags").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetDedicatedNodes").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetLowPriorityNodes").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableAutoScale").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleFormula").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("autoScaleEvaluationInterval").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("enableInterNodeCommunication").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("subnetId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("dynamicVNetAssignmentScope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeStart").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("frontendPortRangeEnd").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("priority").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("access").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourceAddressPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("endpointConfiguration").GetProperty("inboundNATPools")[0].GetProperty("networkSecurityGroupRules")[0].GetProperty("sourcePortRanges")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("provision").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("publicIPAddressConfiguration").GetProperty("ipAddressIds")[0].ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("networkConfiguration").GetProperty("enableAcceleratedNetworking").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("uid").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("gid").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("linuxUserConfiguration").GetProperty("sshPrivateKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("userAccounts")[0].GetProperty("windowsUserConfiguration").GetProperty("loginMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("containerName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("sasKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("blobfuseOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureBlobFileSystemConfiguration").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("nfsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("source").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("cifsMountConfiguration").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountName").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("azureFileUrl").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("accountKey").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("relativeMountPath").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("mountConfiguration")[0].GetProperty("azureFileShareConfiguration").GetProperty("mountOptions").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("targetNodeCommunicationMode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("mode").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("disableAutomaticRollback").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("enableAutomaticOSUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("useRollingUpgradePolicy").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("automaticOSUpgradePolicy").GetProperty("osRollingUpgradeDeferral").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("enableCrossZoneUpgrade").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxBatchInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("maxUnhealthyUpgradedInstancePercent").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("pauseTimeBetweenBatches").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("prioritizeUnhealthyInstances").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("poolInfo").GetProperty("autoPoolSpecification").GetProperty("pool").GetProperty("upgradePolicy").GetProperty("rollingUpgradePolicy").GetProperty("rollbackFailedInstancesOnPolicyBreach").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("jobSpecification").GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("nextRunTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("recentJob").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("metadata")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numSucceededTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numFailedTasks").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("numTaskRetries").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetJobSchedules_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchJobSchedule item in client.GetJobSchedules(timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTasks_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetTasks("", null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTasks_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetTasksAsync("", null, null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTasks_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchTask item in client.GetTasks("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTasks_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchTask item in client.GetTasksAsync("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTasks_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetTasks("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("displayName").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("eTag").ToString()); + Console.WriteLine(result.GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodes")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("start").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("end").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("exitCodeRanges")[0].GetProperty("exitOptions").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("preProcessingError").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("fileUploadError").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("jobAction").ToString()); + Console.WriteLine(result.GetProperty("exitConditions").GetProperty("default").GetProperty("dependencyAction").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("filePattern").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("path").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("containerUrl").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("destination").GetProperty("container").GetProperty("uploadHeaders")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("outputFiles")[0].GetProperty("uploadOptions").GetProperty("uploadCondition").ToString()); + Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("affinityInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxWallClockTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("retentionTime").ToString()); + Console.WriteLine(result.GetProperty("constraints").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("requiredSlots").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("requeueCount").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); + Console.WriteLine(result.GetProperty("executionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("numberOfInstances").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("coordinationCommandLine").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("multiInstanceSettings").GetProperty("commonResourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("userCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("kernelCPUTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("wallClockTime").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOps").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("readIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("writeIOGiB").ToString()); + Console.WriteLine(result.GetProperty("stats").GetProperty("waitTime").ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIds")[0].ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("start").ToString()); + Console.WriteLine(result.GetProperty("dependsOn").GetProperty("taskIdRanges")[0].GetProperty("end").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("applicationId").ToString()); + Console.WriteLine(result.GetProperty("applicationPackageReferences")[0].GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("authenticationTokenSettings").GetProperty("access")[0].ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTasks_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchTask item in client.GetTasks("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" }, expand: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSubTasks_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetSubTasks("", "", null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetSubTasks_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetSubTasksAsync("", "", null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSubTasks_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchSubtask item in client.GetSubTasks("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetSubTasks_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchSubtask item in client.GetSubTasksAsync("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSubTasks_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetSubTasks("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeUrl").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("poolId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("nodeId").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectory").ToString()); + Console.WriteLine(result.GetProperty("nodeInfo").GetProperty("taskRootDirectoryUrl").ToString()); + Console.WriteLine(result.GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("previousState").ToString()); + Console.WriteLine(result.GetProperty("previousStateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("result").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetSubTasks_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchSubtask item in client.GetSubTasks("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), select: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFiles_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetTaskFiles("", "", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFiles_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetTaskFilesAsync("", "", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFiles_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNodeFile item in client.GetTaskFiles("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFiles_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchNodeFile item in client.GetTaskFilesAsync("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFiles_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetTaskFiles("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFiles_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetTaskFilesAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetTaskFiles_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNodeFile item in client.GetTaskFiles("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetTaskFiles_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchNodeFile item in client.GetTaskFilesAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodes_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetNodes("", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodes_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetNodesAsync("", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodes_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNode item in client.GetNodes("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodes_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchNode item in client.GetNodesAsync("")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodes_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetNodes("", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("id").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("schedulingState").ToString()); + Console.WriteLine(result.GetProperty("stateTransitionTime").ToString()); + Console.WriteLine(result.GetProperty("lastBootTime").ToString()); + Console.WriteLine(result.GetProperty("allocationTime").ToString()); + Console.WriteLine(result.GetProperty("ipAddress").ToString()); + Console.WriteLine(result.GetProperty("affinityId").ToString()); + Console.WriteLine(result.GetProperty("vmSize").ToString()); + Console.WriteLine(result.GetProperty("totalTasksRun").ToString()); + Console.WriteLine(result.GetProperty("runningTasksCount").ToString()); + Console.WriteLine(result.GetProperty("runningTaskSlotsCount").ToString()); + Console.WriteLine(result.GetProperty("totalTasksSucceeded").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskUrl").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("jobId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("subtaskId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("taskState").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("requeueCount").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("lastRequeueTime").ToString()); + Console.WriteLine(result.GetProperty("recentTasks")[0].GetProperty("executionInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("commandLine").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("containerRunOptions").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("imageName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("password").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("registryServer").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("registry").GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("containerSettings").GetProperty("workingDirectory").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("autoStorageContainerName").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("storageContainerUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("httpUrl").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("blobPrefix").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("filePath").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("fileMode").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("resourceFiles")[0].GetProperty("identityReference").GetProperty("resourceId").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("environmentSettings")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("username").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("scope").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("userIdentity").GetProperty("autoUser").GetProperty("elevationLevel").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("maxTaskRetryCount").ToString()); + Console.WriteLine(result.GetProperty("startTask").GetProperty("waitForSuccess").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("startTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("endTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("exitCode").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("containerId").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("state").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("containerInfo").GetProperty("error").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("category").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("failureInfo").GetProperty("details")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("retryCount").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("lastRetryTime").ToString()); + Console.WriteLine(result.GetProperty("startTaskInfo").GetProperty("result").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("errors")[0].GetProperty("errorDetails")[0].GetProperty("value").ToString()); + Console.WriteLine(result.GetProperty("isDedicated").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("protocol").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicIPAddress").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("publicFQDN").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("frontendPort").ToString()); + Console.WriteLine(result.GetProperty("endpointConfiguration").GetProperty("inboundEndpoints")[0].GetProperty("backendPort").ToString()); + Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("nodeAgentInfo").GetProperty("lastUpdateTime").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("offer").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("sku").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("version").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("virtualMachineImageId").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("imageReference").GetProperty("exactVersion").ToString()); + Console.WriteLine(result.GetProperty("virtualMachineInfo").GetProperty("scaleSetVmResourceId").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodes_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNode item in client.GetNodes("", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", select: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtensions_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetNodeExtensions("", "", null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeExtensions_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetNodeExtensionsAsync("", "", null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtensions_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNodeVMExtension item in client.GetNodeExtensions("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeExtensions_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchNodeVMExtension item in client.GetNodeExtensionsAsync("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtensions_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetNodeExtensions("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, new string[] { "" }, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("provisioningState").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("publisher").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("type").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("typeHandlerVersion").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("autoUpgradeMinorVersion").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("enableAutomaticUpgrade").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("settings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("protectedSettings").GetProperty("").ToString()); + Console.WriteLine(result.GetProperty("vmExtension").GetProperty("provisionAfterExtensions")[0].ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("displayStatus").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("level").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("statuses")[0].GetProperty("time").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("code").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("displayStatus").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("level").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("message").ToString()); + Console.WriteLine(result.GetProperty("instanceView").GetProperty("subStatuses")[0].GetProperty("time").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeExtensions_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNodeVMExtension item in client.GetNodeExtensions("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, select: new string[] { "" })) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFiles_ShortVersion() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetNodeFiles("", "", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFiles_ShortVersion_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetNodeFilesAsync("", "", null, null, null, null, null, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFiles_ShortVersion_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNodeFile item in client.GetNodeFiles("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFiles_ShortVersion_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchNodeFile item in client.GetNodeFilesAsync("", "")) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFiles_AllParameters() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BinaryData item in client.GetNodeFiles("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFiles_AllParameters_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BinaryData item in client.GetNodeFilesAsync("", "", 1234, DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), 1234, "", true, null)) + { + JsonElement result = JsonDocument.Parse(item.ToStream()).RootElement; + Console.WriteLine(result.GetProperty("name").ToString()); + Console.WriteLine(result.GetProperty("url").ToString()); + Console.WriteLine(result.GetProperty("isDirectory").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("creationTime").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("lastModified").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentLength").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("contentType").ToString()); + Console.WriteLine(result.GetProperty("properties").GetProperty("fileMode").ToString()); + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public void Example_Batch_GetNodeFiles_AllParameters_Convenience() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + foreach (BatchNodeFile item in client.GetNodeFiles("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) + { + } + } + + [Test] + [Ignore("Only validating compilation of examples")] + public async Task Example_Batch_GetNodeFiles_AllParameters_Convenience_Async() + { + Uri endpoint = new Uri(""); + TokenCredential credential = new DefaultAzureCredential(); + BatchClient client = new BatchClient(endpoint, credential); + + await foreach (BatchNodeFile item in client.GetNodeFilesAsync("", "", timeOutInSeconds: 1234, ocpdate: DateTimeOffset.Parse("Tue, 10 May 2022 18:57:31 GMT"), maxresults: 1234, filter: "", recursive: true)) + { + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchClientBehavior.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchClientBehavior.cs new file mode 100644 index 0000000000000..c53355b1bbc29 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchClientBehavior.cs @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + /// + /// Derived classes modify operational behaviors of a Azure Batch Service client. Derived classes can be called out of order and simultaneously by several threads. Implementations should be threadsafe. + /// + public abstract class BatchClientBehavior + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchLiveTestBase.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchLiveTestBase.cs new file mode 100644 index 0000000000000..097a1ee552cef --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchLiveTestBase.cs @@ -0,0 +1,117 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Core.TestFramework; +using Azure.Identity; +using Microsoft.Identity.Client.Platforms.Features.DesktopOs.Kerberos; +using Azure.Compute.Batch; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + public class BatchLiveTestBase : RecordedTestBase + { + public enum TestAuthMethods + { + Default, + ClientSecret, + NamedKey + }; + + public BatchLiveTestBase(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + SanitizedHeaders.Add("client-request-id"); + UseDefaultGuidFormatForClientRequestId = true; + } + + public BatchLiveTestBase(bool isAsync) : base(isAsync) + { + SanitizedHeaders.Add("client-request-id"); + UseDefaultGuidFormatForClientRequestId = true; + } + + public bool isPlayBack() + { + return this.Mode == RecordedTestMode.Playback; + } + + /// + /// Creates a with the endpoint and API key provided via environment + /// variables and instruments it to make use of the Azure Core Test Framework functionalities. + /// + /// Whether or not to use a to authenticate. An is used by default. + /// Whether or not instrumenting should be skipped. Avoid skipping it as much as possible. + /// The instrumented . + public BatchClient CreateBatchClient(TestAuthMethods testAuthMethod = TestAuthMethods.ClientSecret, bool skipInstrumenting = false) + { + var options = InstrumentClientOptions(new BatchClientOptions()); + BatchClient client; + Uri uri = new Uri("https://" + TestEnvironment.BatchAccountURI); + + var authorityHost = TestEnvironment.AuthorityHostUrl; + switch (testAuthMethod) + { + case TestAuthMethods.ClientSecret: + { + client = new BatchClient(uri, TestEnvironment.Credential, options); + } + break; + case TestAuthMethods.NamedKey: + { + var credential = new AzureNamedKeyCredential(TestEnvironment.BatchAccountName, TestEnvironment.BatchAccountKey); + client = new BatchClient(uri, credential, options); + } + break; + default: + { + var credential = new DefaultAzureCredential(); + client = new BatchClient(uri, credential, options); + } + break; + } + return skipInstrumenting ? client : InstrumentClient(client); + } + + /// + /// Poll all the tasks in the given job and wait for them to reach the completed state. + /// + /// The ID of the job to poll + /// A task that will complete when all Batch tasks have completed. + /// Thrown if all tasks haven't reached the completed state after a certain period of time + public async Task waitForTasksToComplete(BatchClient client, String jobId, bool isPlayBackMode = false) + { + // Note that this timeout should take into account the time it takes for the pool to scale up + var timeoutAfter = DateTime.Now.AddMinutes(10); + while (DateTime.Now < timeoutAfter) + { + var allComplete = true; + var tasks = client.GetTasksAsync(jobId, select: new string[] { "id", "state" }); + await foreach (BatchTask task in tasks) + { + if (task.State != BatchTaskState.Completed) + { + allComplete = false; + break; + } + } + + if (allComplete) + { + return; + } + + if (isPlayBackMode == false) + { await Task.Delay(10000); } + } + + throw new TimeoutException("Task(s) did not complete within the specified time"); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchLiveTestEnvironment.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchLiveTestEnvironment.cs new file mode 100644 index 0000000000000..40a6cbb2b9e91 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/BatchLiveTestEnvironment.cs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Core.TestFramework; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + public class BatchLiveTestEnvironment: TestEnvironment + { + public string BatchAccountName => GetRecordedVariable("batch_account_name"); + + public string BatchAccountURI => GetRecordedVariable("batch_account_uri"); + + public string BatchAccountKey => GetRecordedVariable("batch_account_key"); + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/DetailLevel.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/DetailLevel.cs new file mode 100644 index 0000000000000..7af86e4f891bf --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/DetailLevel.cs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + /// + /// Controls the amount of detail requested from the Azure Batch service when listing or + /// retrieving resources. + /// + /// The only supported implementation of DetailLevel is . + /// Other implementations are ignored. + public abstract class DetailLevel + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/IRefreshable.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/IRefreshable.cs new file mode 100644 index 0000000000000..ac4c420a8ebdf --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/IRefreshable.cs @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + /// + /// Provides a mechanism for refreshing a resource. + /// + public interface IRefreshable + { + /// + /// Begins asynchronous call to refresh the current object. + /// + /// Controls the detail level of the data returned by a call to the Azure Batch Service. If a detail level which omits the "Name" property is specified, refresh will fail. + /// A collection of BatchClientBehavior instances that are applied after the CustomBehaviors on the current object. + /// A for controlling the lifetime of the asynchronous operation. + /// A object that represents the asynchronous operation. + System.Threading.Tasks.Task RefreshAsync(DetailLevel detailLevel = null, IEnumerable additionalBehaviors = null, CancellationToken cancellationToken = default(CancellationToken)); + + /// + /// Blocking call to force a refresh of the current object. + /// + /// Controls the detail level of the data returned by a call to the Azure Batch Service. If a detail level which omits the "Name" property is specified, refresh will fail. + /// A collection of BatchClientBehavior instances that are applied after the CustomBehaviors on the current object. + void Refresh(DetailLevel detailLevel = null, IEnumerable additionalBehaviors = null); + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/IaasLinuxPoolFixture.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/IaasLinuxPoolFixture.cs new file mode 100644 index 0000000000000..00e769d3fd8ed --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/IaasLinuxPoolFixture.cs @@ -0,0 +1,69 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + internal class IaasLinuxPoolFixture : PoolFixture + { + public IaasLinuxPoolFixture(BatchClient batchClient, string poolID, bool isPlayback) : base(poolID, batchClient, isPlayback) { } + + public async Task CreatePoolAsync(int targetDedicatedNodes = 1) + { + BatchPool currentPool = await FindPoolIfExistsAsync(); + + if (currentPool == null) + { + BatchPoolCreateContent batchPoolCreateOptions = CreatePoolOptions(targetDedicatedNodes); + Response response = await client.CreatePoolAsync(batchPoolCreateOptions); + if (response == null) + { } + } + + return await WaitForPoolAllocation(client, PoolId); + } + + public BatchPoolCreateContent CreatePoolOptions(int? targetDedicatedNodes = null) + { + // create a new pool + ImageReference imageReference = new ImageReference() + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-smalldisk", + Version = "latest" + }; + VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration(imageReference, "batch.node.windows amd64"); + + BatchPoolCreateContent batchPoolCreateOptions = new BatchPoolCreateContent( + PoolId, + VMSize) + { + VirtualMachineConfiguration = virtualMachineConfiguration, + TargetDedicatedNodes = targetDedicatedNodes, + }; + return batchPoolCreateOptions; + } + + internal async void DeletePool() + { + try + { + await client.DeletePoolAsync(PoolId); + WaitForPoolDeletion(client, PoolId); + } catch (Exception ex) + { + int x = 0; + x++; + var xx = ex.Message; + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/PoolFixture.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/PoolFixture.cs new file mode 100644 index 0000000000000..7b1f97e45f182 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/PoolFixture.cs @@ -0,0 +1,119 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core.TestFramework; +using Azure.Compute.Batch; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + public class PoolFixture + { + public const string OSFamily = "4"; + public const string VMSize = "STANDARD_D1_v2"; + public const string AdminUserAccountName = "BatchTestAdmin"; + public const string NonAdminUserAccountName = "BatchTestNonAdmin"; + + public BatchPool Pool { get; protected set; } + + public string PoolId { get; private set; } + + public bool PlayBack { get; private set; } + + protected readonly BatchClient client; + + protected PoolFixture(string poolId, BatchClient batchClient, bool isPlayback) + { + PoolId = poolId; + client = batchClient; + PlayBack = isPlayback; + } + + public async void Dispose() + { + //This should not throw so swallow exceptions? + try + { + await this.client.DeletePoolAsync(PoolId); + } + catch (Exception) + { + } + } + + public async Task FindPoolIfExistsAsync() + { + // reuse existing pool if it exists + client.GetPoolsAsync(maxresults: 100, timeOutInSeconds: 10); + AsyncPageable batchPools = client.GetPoolsAsync(); + + await foreach (BatchPool curPool in batchPools) + { + if (curPool.Id.Equals(PoolId)) + { + return curPool; + } + } + + return null; + } + + public async Task WaitForPoolAllocation(BatchClient client, string poolId) + { + BatchPool thePool = await client.GetPoolAsync(poolId); + + //Wait for pool to be in a usable state + //TODO: Use a Utilities waiter + TimeSpan computeNodeAllocationTimeout = TimeSpan.FromMinutes(10); + await TestUtilities.WaitForPoolToReachStateAsync(client, poolId, AllocationState.Steady, computeNodeAllocationTimeout, PlayBack); + + //Wait for the compute nodes in the pool to be in a usable state + //TODO: Use a Utilities waiter + TimeSpan computeNodeSteadyTimeout = TimeSpan.FromMinutes(25); + DateTime allocationWaitStartTime = DateTime.UtcNow; + DateTime timeoutAfterThisTimeUtc = allocationWaitStartTime.Add(computeNodeSteadyTimeout); + + List computeNodes= await client.GetNodesAsync(poolId).ToEnumerableAsync(); + + while (computeNodes.Any(computeNode => computeNode.State != BatchNodeState.Idle)) + { + if (!PlayBack) + { Thread.Sleep(TimeSpan.FromSeconds(10)); } + + computeNodes = await client.GetNodesAsync(poolId).ToEnumerableAsync(); + if (DateTime.UtcNow > timeoutAfterThisTimeUtc) + { + throw new Exception("CreatePool: Timed out waiting for compute nodes in pool to reach idle state. Timeout: " + computeNodeSteadyTimeout.ToString()); + } + } + + return thePool; + } + + public async void WaitForPoolDeletion(BatchClient client, string poolId) + { + BatchPool thePool = await client.GetPoolAsync(poolId); + + TimeSpan deletionOperationTimeout = TimeSpan.FromMinutes(10); + DateTime allocationWaitStartTime = DateTime.UtcNow; + DateTime timeoutAfterThisTimeUtc = allocationWaitStartTime.Add(deletionOperationTimeout); + + while (thePool != null) + { + if (!PlayBack) + { Thread.Sleep(TimeSpan.FromSeconds(10)); } + + thePool = await client.GetPoolAsync(poolId); + if (DateTime.UtcNow > timeoutAfterThisTimeUtc) + { + throw new Exception("DeletePool: Timed out waiting for pool to delete"); + } + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/PoolUtilites.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/PoolUtilites.cs new file mode 100644 index 0000000000000..4b1289fba2ac6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/PoolUtilites.cs @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + internal class PoolUtilites + { + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/TestUtilities.cs b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/TestUtilities.cs new file mode 100644 index 0000000000000..0a1cc540d6a60 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Infrastructure/TestUtilities.cs @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Text; +using System.Text.RegularExpressions; +using System.Threading; +using System.Threading.Tasks; + +namespace Azure.Compute.Batch.Tests.Infrastructure +{ + public static class TestUtilities + { + #region Wait helpers + + public static async Task WaitForPoolToReachStateAsync(BatchClient client, string poolId, AllocationState targetAllocationState, TimeSpan timeout, bool isPlayback) + { + DateTime allocationWaitStartTime = DateTime.UtcNow; + DateTime timeoutAfterThisTimeUtc = allocationWaitStartTime.Add(timeout); + + BatchPool pool = await client.GetPoolAsync(poolId); + + while (pool.AllocationState != targetAllocationState) + { + if (!isPlayback) + { await Task.Delay(TimeSpan.FromSeconds(10)).ConfigureAwait(continueOnCapturedContext: false); } + + pool = await client.GetPoolAsync(poolId); + + if (DateTime.UtcNow > timeoutAfterThisTimeUtc) + { + throw new Exception("RefreshBasedPollingWithTimeout: Timed out waiting for condition to be met."); + } + } + } + + /// + /// Will throw if timeout is exceeded. + /// + /// + /// + /// + /// + public static async Task RefreshBasedPollingWithTimeoutAsync(Func> condition, TimeSpan timeout) + { + DateTime allocationWaitStartTime = DateTime.UtcNow; + DateTime timeoutAfterThisTimeUtc = allocationWaitStartTime.Add(timeout); + + while (!(await condition().ConfigureAwait(continueOnCapturedContext: false))) + { + await Task.Delay(TimeSpan.FromSeconds(10)).ConfigureAwait(continueOnCapturedContext: false); + //await refreshing.RefreshAsync().ConfigureAwait(continueOnCapturedContext: false); + + if (DateTime.UtcNow > timeoutAfterThisTimeUtc) + { + throw new Exception("RefreshBasedPollingWithTimeout: Timed out waiting for condition to be met."); + } + } + } + #endregion + #region Naming helpers + + public static string GenerateResourceId( + string baseId = null, + int? maxLength = null, + [CallerMemberName] string caller = null) + { + int actualMaxLength = maxLength ?? 50; + + var guid = Guid.NewGuid().ToString("N"); + if (baseId == null && caller == null) + { + return guid; + } + else + { + const int minRandomCharacters = 10; + // make the ID only contain alphanumeric or underscore or dash: + var id = baseId ?? caller; + var safeBaseId = Regex.Replace(id, "[^A-Za-z0-9_-]", ""); + safeBaseId = safeBaseId.Length > actualMaxLength - minRandomCharacters ? safeBaseId.Substring(0, actualMaxLength - minRandomCharacters) : safeBaseId; + var result = $"{safeBaseId}_{guid}"; + return result.Length > actualMaxLength ? result.Substring(0, actualMaxLength) : result; + } + } + + public static string GetMyName() + { + string domainName = Environment.GetEnvironmentVariable("USERNAME"); + + return domainName; + } + + public static string GetTimeStamp() + { + return DateTime.UtcNow.ToString("yyyy-MM-dd_hh-mm-ss"); + } + + public static string GenerateRandomPassword() + { + return Guid.NewGuid().ToString(); + } + #endregion + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchApplicationsIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchApplicationsIntegrationTests.cs new file mode 100644 index 0000000000000..077cd4f6f4e44 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchApplicationsIntegrationTests.cs @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class BatchApplicationsIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchApplicationsIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchApplicationsIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task GetandListApplication() + { + var client = CreateBatchClient(); + + // list out all applications + string appID = null; + await foreach (BatchApplication item in client.GetApplicationsAsync()) + { + appID = item.Id; + } + Assert.NotNull(appID); + + // verify we can get an application + var application = await client.GetApplicationAsync(appID); + Assert.AreEqual(appID, application.Value.Id); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchJobIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchJobIntegrationTests.cs new file mode 100644 index 0000000000000..bc5e1456d290c --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchJobIntegrationTests.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Text; +using System.Text.Json; +using System.Threading.Tasks; +using System.Xml; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core; +using Azure.Core.TestFramework; +using NUnit.Framework; +using static System.Net.WebRequestMethods; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class BatchJobIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchJobIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchJobIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task JobOperations() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "JobOperations", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob1"; + string taskID = "Task1"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo) + { + JobPreparationTask = new BatchJobPreparationTask(commandLine), + JobReleaseTask = new BatchJobReleaseTask(commandLine), + }; + Response response = await client.CreateJobAsync(batchJobCreateContent); + + // verify list jobs + BatchJob job = null; + await foreach (BatchJob item in client.GetJobsAsync()) + { + if (item.Id == jobID) + { + job = item; + } + } + + Assert.IsNotNull(job); + Assert.AreEqual(job.OnAllTasksComplete, OnAllBatchTasksComplete.NoAction); + + // verify update job + job.OnAllTasksComplete = OnAllBatchTasksComplete.TerminateJob; + response = await client.ReplaceJobAsync(jobID, job); + job = await client.GetJobAsync(jobID); + + Assert.IsNotNull(job); + Assert.AreEqual(job.OnAllTasksComplete, OnAllBatchTasksComplete.TerminateJob); + + // create a task + BatchTaskCreateContent taskCreateContent = new BatchTaskCreateContent(taskID, commandLine); + response = await client.CreateTaskAsync(jobID, taskCreateContent); + Assert.IsFalse(response.IsError); + + // list task counts + BatchTaskCountsResult batchTaskCountsResult = await client.GetJobTaskCountsAsync(jobID); + Assert.IsNotNull(batchTaskCountsResult); + // need to make a loop and ping GetJobTaskCountsAsync to get the status + //Assert.AreEqual(batchTaskCountsResult.TaskCounts.Active, 1); + + // disable a job + BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Requeue); + response = await client.DisableJobAsync(jobID, content); + Assert.IsFalse(response.IsError); + + // enable a job + response = await client.EnableJobAsync(jobID); + Assert.IsFalse(response.IsError); + + await waitForTasksToComplete(client, jobID, isPlayBack()); + + // get JobPreparationAndReleaseTaskStatuses + int count = 0; + await foreach (BatchJobPreparationAndReleaseTaskStatus item in client.GetJobPreparationAndReleaseTaskStatusesAsync(jobID)) + { + count++; + } + Assert.AreNotEqual(0, count); + + // job terminate + BatchJobTerminateContent parameters = new BatchJobTerminateContent + { + TerminationReason = "", + }; + response = await client.TerminateJobAsync(jobID, parameters); + Assert.IsFalse(response.IsError); + } + finally + { + await client.DeletePoolAsync(poolID); + await client.DeleteJobAsync(jobID); + } + } + + [RecordedTest] + public async Task PatchJob() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "PatchJob", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob2"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo) + { + JobPreparationTask = new BatchJobPreparationTask(commandLine), + JobReleaseTask = new BatchJobReleaseTask(commandLine), + }; + Response response = await client.CreateJobAsync(batchJobCreateContent); + Assert.AreEqual(201, response.Status); + + // verify update job + BatchJobUpdateContent batchUpdateContent = new BatchJobUpdateContent(); + batchUpdateContent.Metadata.Add(new MetadataItem("name", "value")); + response = await client.UpdateJobAsync(jobID, batchUpdateContent); + Assert.AreEqual(200, response.Status); + + BatchJob job = await client.GetJobAsync(jobID); + + Assert.IsNotNull(job); + Assert.AreEqual(job.Metadata.First().Value, "value"); + } + finally + { + await client.DeletePoolAsync(poolID); + await client.DeleteJobAsync(jobID); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchJobScheduleIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchJobScheduleIntegrationTests.cs new file mode 100644 index 0000000000000..0b93058c53fe3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchJobScheduleIntegrationTests.cs @@ -0,0 +1,288 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class BatchJobScheduleIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchJobScheduleIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchJobScheduleIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task JobScheduleAutoPool() + { + var client = CreateBatchClient(); + string jobScheduleId = "jobSchedule1"; + DateTime unboundDNRU = DateTime.Parse("2026-08-18T00:00:00.0000000Z"); + + BatchJobScheduleConfiguration schedule = new BatchJobScheduleConfiguration() + { + DoNotRunUntil = unboundDNRU, + }; + // create a new pool + ImageReference imageReference = new ImageReference() + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-smalldisk", + Version = "latest" + }; + VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration(imageReference, "batch.node.windows amd64"); + + BatchPoolSpecification batchPoolSpecification = new BatchPoolSpecification("STANDARD_D1_v2") + { + VirtualMachineConfiguration = virtualMachineConfiguration, + TargetDedicatedNodes = 1, + }; + BatchAutoPoolSpecification autoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) + { + KeepAlive = false, + Pool = batchPoolSpecification, + }; + BatchPoolInfo poolInfo = new BatchPoolInfo() + { + AutoPoolSpecification = autoPoolSpecification, + }; + BatchJobSpecification jobSpecification = new BatchJobSpecification(poolInfo); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent(jobScheduleId, schedule, jobSpecification); + + try + { + Response response = await client.CreateJobScheduleAsync(jobSchedule); + + // check to see if the job schedule exists + bool result = await client.JobScheduleExistsAsync(jobScheduleId); + Assert.True(result); + + // get the job schedule and verify + BatchJobSchedule batchJobSchedule = await client.GetJobScheduleAsync(jobScheduleId); + Assert.NotNull(batchJobSchedule); + Assert.AreEqual(batchJobSchedule.JobSpecification.PoolInfo.AutoPoolSpecification.Pool.VirtualMachineConfiguration.ImageReference.Sku, "2019-datacenter-smalldisk"); + + // disable the schedule + response = await client.DisableJobScheduleAsync(jobScheduleId); + Assert.AreEqual(204, response.Status); + + // enable the schedule + response = await client.EnableJobScheduleAsync(jobScheduleId); + Assert.AreEqual(204, response.Status); + + response = await client.TerminateJobScheduleAsync(jobScheduleId); + Assert.AreEqual(202, response.Status); + } + finally + { + await client.DeleteJobScheduleAsync(jobScheduleId); + } + } + + [RecordedTest] + public async Task GetJobsFromSchedules() + { + var client = CreateBatchClient(); + string jobScheduleId = "jobSchedule2"; + BatchJobScheduleConfiguration schedule = new BatchJobScheduleConfiguration() + ; + // create a new pool + ImageReference imageReference = new ImageReference() + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-smalldisk", + Version = "latest" + }; + VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration(imageReference, "batch.node.windows amd64"); + + BatchPoolSpecification batchPoolSpecification = new BatchPoolSpecification("STANDARD_D1_v2") + { + VirtualMachineConfiguration = virtualMachineConfiguration, + TargetDedicatedNodes = 1, + }; + BatchAutoPoolSpecification autoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) + { + KeepAlive = false, + Pool = batchPoolSpecification, + }; + BatchPoolInfo poolInfo = new BatchPoolInfo() + { + AutoPoolSpecification = autoPoolSpecification, + }; + BatchJobManagerTask batchJobManagerTask = new BatchJobManagerTask("task1", "cmd / c echo Hello World"); + + BatchJobSpecification jobSpecification = new BatchJobSpecification(poolInfo) + { + JobManagerTask = batchJobManagerTask, + }; + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent(jobScheduleId, schedule, jobSpecification); + + try + { + Response response = await client.CreateJobScheduleAsync(jobSchedule); + + // check to see if the job schedule exists via list + bool found = false; + await foreach (BatchJobSchedule item in client.GetJobSchedulesAsync()) + { + if ( item.Id == jobScheduleId) + found = true; + } + + Assert.True(found); + + // update the job schedule + int jobCount = 0; + await foreach (BatchJob item in client.GetJobsFromSchedulesAsync(jobScheduleId)) + { + jobCount++; + } + + Assert.AreEqual(1, jobCount); + } + finally + { + await client.DeleteJobScheduleAsync(jobScheduleId); + } + } + + [RecordedTest] + public async Task JobScheduleUpdate() + { + var client = CreateBatchClient(); + string jobScheduleId = "jobSchedule3"; + DateTime unboundDNRU = DateTime.Parse("2026-08-18T00:00:00.0000000Z"); + BatchJobScheduleConfiguration schedule = new BatchJobScheduleConfiguration() + { + DoNotRunUntil = unboundDNRU, + }; + // create a new pool + ImageReference imageReference = new ImageReference() + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-smalldisk", + Version = "latest" + }; + VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration(imageReference, "batch.node.windows amd64"); + + BatchPoolSpecification batchPoolSpecification = new BatchPoolSpecification("STANDARD_D1_v2") + { + VirtualMachineConfiguration = virtualMachineConfiguration, + TargetDedicatedNodes = 1, + }; + BatchAutoPoolSpecification autoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) + { + KeepAlive = false, + Pool = batchPoolSpecification, + }; + BatchPoolInfo poolInfo = new BatchPoolInfo() + { + AutoPoolSpecification = autoPoolSpecification, + }; + BatchJobSpecification jobSpecification = new BatchJobSpecification(poolInfo); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent(jobScheduleId, schedule, jobSpecification); + + try + { + Response response = await client.CreateJobScheduleAsync(jobSchedule); + + BatchJobSchedule batchJobSchedule = await client.GetJobScheduleAsync(jobScheduleId); + Assert.NotNull(batchJobSchedule); + + response = await client.ReplaceJobScheduleAsync(jobScheduleId, batchJobSchedule); + Assert.AreEqual(200, response.Status); + + // blocked due to not having a model + //await client.UpdateJobScheduleAsync() + } + finally + { + await client.DeleteJobScheduleAsync(jobScheduleId); + } + } + + [RecordedTest] + public async Task JobSchedulePatch() + { + var client = CreateBatchClient(); + string jobScheduleId = "jobSchedulePatch"; + DateTime unboundDNRU = DateTime.Parse("2026-08-18T00:00:00.0000000Z"); + BatchJobScheduleConfiguration schedule = new BatchJobScheduleConfiguration() + { + DoNotRunUntil = unboundDNRU, + }; + // create a new pool + ImageReference imageReference = new ImageReference() + { + Publisher = "MicrosoftWindowsServer", + Offer = "WindowsServer", + Sku = "2019-datacenter-smalldisk", + Version = "latest" + }; + VirtualMachineConfiguration virtualMachineConfiguration = new VirtualMachineConfiguration(imageReference, "batch.node.windows amd64"); + + BatchPoolSpecification batchPoolSpecification = new BatchPoolSpecification("STANDARD_D1_v2") + { + VirtualMachineConfiguration = virtualMachineConfiguration, + TargetDedicatedNodes = 0, + }; + BatchAutoPoolSpecification autoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) + { + KeepAlive = false, + Pool = batchPoolSpecification, + }; + BatchPoolInfo poolInfo = new BatchPoolInfo() + { + AutoPoolSpecification = autoPoolSpecification, + }; + BatchJobSpecification jobSpecification = new BatchJobSpecification(poolInfo); + + BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent(jobScheduleId, schedule, jobSpecification); + + try + { + Response response = await client.CreateJobScheduleAsync(jobSchedule); + + BatchJobScheduleUpdateContent batchJobScheduleUpdateContent = new BatchJobScheduleUpdateContent(); + batchJobScheduleUpdateContent.Metadata.Add(new MetadataItem("name", "value")); + + response = await client.UpdateJobScheduleAsync(jobScheduleId, batchJobScheduleUpdateContent); + Assert.AreEqual(200, response.Status); + + BatchJobSchedule patchJobSchedule = await client.GetJobScheduleAsync(jobScheduleId); + + Assert.IsNotNull(patchJobSchedule); + Assert.AreEqual(patchJobSchedule.Metadata.First().Value, "value"); + } + finally + { + await client.DeleteJobScheduleAsync(jobScheduleId); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs new file mode 100644 index 0000000000000..cd180760bceb3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchNodeIntegrationTests.cs @@ -0,0 +1,255 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + public class BatchNodeIntegrationTests : BatchLiveTestBase + { + /// BatchNodeIntegrationTests"/> class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchNodeIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchNodeIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task ListBatchNode() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "ListBatchNode", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(2); + + int count = 0; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + count++; + } + + // verify we found at least one poolnode + Assert.AreEqual(2, count); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task BatchNodeUser() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "BatchNodeUser", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + var userName = "User1"; + var userPassWord = "Password1"; + var updatedPassWord = "Password2"; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + string batchNodeID = ""; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + batchNodeID = item.Id; + } + Assert.IsNotEmpty(batchNodeID); + + // create new user + BatchNodeUserCreateContent user = new BatchNodeUserCreateContent(userName) + { + Password = userPassWord + }; + Response response = await client.CreateNodeUserAsync(poolID, batchNodeID, user); + Assert.IsFalse(response.IsError); + + // update users password + BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent() + { + Password = updatedPassWord + }; + response = await client.ReplaceNodeUserAsync(poolID, batchNodeID, userName, content); + Assert.IsFalse(response.IsError); + + // delete uswer + response = await client.DeleteNodeUserAsync(poolID, batchNodeID, userName); + Assert.IsFalse(response.IsError); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task RebootBatchNode() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "RebootBatchNode", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + string batchNodeID = ""; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + batchNodeID = item.Id; + } + Assert.IsNotEmpty(batchNodeID); + + // reboot node + Response response = await client.RebootNodeAsync(poolID, batchNodeID); + Assert.IsFalse(response.IsError); + await iaasWindowsPoolFixture.WaitForPoolAllocation(client, poolID); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + public async Task BatchNodeExtension() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "BatchNodeExtension", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPoolCreateContent batchPoolCreateOptions = iaasWindowsPoolFixture.CreatePoolOptions(1); + VMExtension vMExtension = new VMExtension("CustomExtension", "Microsoft.Azure.Geneva", "GenevaMonitoring") + { + TypeHandlerVersion = "2.16", + AutoUpgradeMinorVersion = true, + EnableAutomaticUpgrade = true, + ProtectedSettings = {}, + Settings = {}, + }; + batchPoolCreateOptions.VirtualMachineConfiguration.Extensions.Add(vMExtension); + Response response = await client.CreatePoolAsync(batchPoolCreateOptions); + + BatchPool pool = await iaasWindowsPoolFixture.WaitForPoolAllocation(client, poolID); + + string batchNodeID = ""; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + batchNodeID = item.Id; + } + Assert.IsNotEmpty(batchNodeID); + + BatchNodeVMExtension batchNodeVMExtension1 = await client.GetNodeExtensionAsync(poolID, batchNodeID, "CustomExtension"); + + // reboot node + await foreach (BatchNodeVMExtension item in client.GetNodeExtensionsAsync(poolID, batchNodeID)) + { + Assert.NotNull(item); + Assert.IsNotEmpty(item.VmExtension.Name); + + BatchNodeVMExtension batchNodeVMExtension = await client.GetNodeExtensionAsync(poolID, batchNodeID, item.VmExtension.Name); + Assert.NotNull(batchNodeVMExtension); + } + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task GetRemoteLoginSettings() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "GetRemoteLoginSettings", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(2); + + string batchNodeID = ""; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + batchNodeID = item.Id; + } + Assert.IsNotEmpty(batchNodeID); + + BatchNodeRemoteLoginSettings batchNodeRemoteLoginSettings = await client.GetNodeRemoteLoginSettingsAsync(poolID, batchNodeID); + Assert.NotNull(batchNodeRemoteLoginSettings); + Assert.IsNotEmpty(batchNodeRemoteLoginSettings.RemoteLoginIpAddress); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task Scheduling() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "Scheduling", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + string batchNodeID = ""; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + batchNodeID = item.Id; + } + Assert.IsNotEmpty(batchNodeID); + BatchNodeDisableSchedulingContent batchNodeDisableSchedulingContent = new BatchNodeDisableSchedulingContent() + { + NodeDisableSchedulingOption = BatchNodeDisableSchedulingOption.TaskCompletion, + }; + Response response = await client.DisableNodeSchedulingAsync(poolID, batchNodeID, batchNodeDisableSchedulingContent); + Assert.AreEqual(200, response.Status); + + response = await client.EnableNodeSchedulingAsync(poolID, batchNodeID); + Assert.AreEqual(200, response.Status); + + UploadBatchServiceLogsContent uploadBatchServiceLogsContent = new UploadBatchServiceLogsContent("http://fake.com", DateTimeOffset.Parse("2026-05-01T00:00:00.0000000Z")); + + UploadBatchServiceLogsResult uploadBatchServiceLogsResult = await client.UploadNodeLogsAsync(poolID, batchNodeID, uploadBatchServiceLogsContent); + Assert.NotNull(uploadBatchServiceLogsResult); + Assert.IsNotEmpty(uploadBatchServiceLogsResult.VirtualDirectoryName); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs new file mode 100644 index 0000000000000..1382e5ed18fc8 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchPoolIntegrationTests.cs @@ -0,0 +1,308 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + public class BatchPoolIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchPoolIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchPoolIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task GetPoolNodeCounts() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "GetPoolNodeCounts", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(); + + int count = 0; + bool poolFound = false; + await foreach (BatchPoolNodeCounts item in client.GetPoolNodeCountsAsync()) + { + count++; + poolFound |= pool.Id.Equals(item.PoolId, StringComparison.OrdinalIgnoreCase); + } + + // verify we found at least one poolnode + Assert.AreNotEqual(0, count); + Assert.IsTrue(poolFound); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task PoolExists() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "PoolExists", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + bool poolExist = await client.PoolExistsAsync(poolID); + + var poolDoesntExist = await client.PoolExistsAsync("fakepool"); + + // verify exists + Assert.True(poolExist); + Assert.False(poolDoesntExist); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task PoolGetPoolUsageMetrics() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "PoolGetPoolUsageMetrics", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + BatchPoolUsageMetrics exptedItem = null; + await foreach (BatchPoolUsageMetrics item in client.GetPoolUsageMetricsAsync()) + { + exptedItem = item; + } + + // verify that some usage exists, we can't predict what usage that might be at the time of the test + Assert.NotNull(exptedItem); + Assert.IsNotEmpty(exptedItem.PoolId); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task PoolRemoveNodes() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "PoolRemoveNodes", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(2); + BatchPool orginalPool = await client.GetPoolAsync(poolID); + + string batchNodeID = ""; + int nodeCount = 0; + await foreach (BatchNode item in client.GetNodesAsync(poolID)) + { + nodeCount++; + batchNodeID = item.Id; + } + + Assert.AreEqual(2, nodeCount); + + BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { batchNodeID }); + Response response = await client.RemoveNodesAsync(poolID, content); + Assert.AreEqual(202, response.Status); + + BatchPool modfiedPool = await client.GetPoolAsync(poolID); + + // verify that some usage exists, we can't predict what usage that might be at the time of the test + Assert.NotNull(modfiedPool); + Assert.AreEqual(AllocationState.Resizing, modfiedPool.AllocationState); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task AutoScale() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "AutoScale", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + string poolASFormulaOrig = "$TargetDedicated = 0;"; + string poolASFormulaNew = "$TargetDedicated = 1;"; + TimeSpan evalInterval = TimeSpan.FromMinutes(6); + + try + { + // create a pool to verify we have something to query for + BatchPoolCreateContent batchPoolCreateOptions = iaasWindowsPoolFixture.CreatePoolOptions(); + batchPoolCreateOptions.EnableAutoScale = true; + batchPoolCreateOptions.AutoScaleEvaluationInterval = evalInterval; + batchPoolCreateOptions.AutoScaleFormula = poolASFormulaOrig; + Response response = await client.CreatePoolAsync(batchPoolCreateOptions); + BatchPool autoScalePool = await iaasWindowsPoolFixture.WaitForPoolAllocation(client, iaasWindowsPoolFixture.PoolId); + + // verify autoscale settings + Assert.IsTrue(autoScalePool.EnableAutoScale); + Assert.AreEqual(autoScalePool.AutoScaleFormula, poolASFormulaOrig); + + // evaluate autoscale formula + BatchPoolEvaluateAutoScaleContent batchPoolEvaluateAutoScaleContent = new BatchPoolEvaluateAutoScaleContent(poolASFormulaNew); + AutoScaleRun eval = await client.EvaluatePoolAutoScaleAsync(autoScalePool.Id, batchPoolEvaluateAutoScaleContent); + Assert.Null(eval.Error); + + // change eval interval + TimeSpan newEvalInterval = evalInterval + TimeSpan.FromMinutes(1); + BatchPoolEnableAutoScaleContent batchPoolEnableAutoScaleContent = new BatchPoolEnableAutoScaleContent() + { + AutoScaleEvaluationInterval = newEvalInterval, + AutoScaleFormula = poolASFormulaNew, + }; + + // verify + response = await client.EnablePoolAutoScaleAsync(autoScalePool.Id, batchPoolEnableAutoScaleContent); + Assert.AreEqual(200, response.Status); + autoScalePool = await client.GetPoolAsync((autoScalePool.Id)); + Assert.AreEqual(autoScalePool.AutoScaleEvaluationInterval, newEvalInterval); + Assert.AreEqual(autoScalePool.AutoScaleFormula, poolASFormulaNew); + + response = await client.DisablePoolAutoScaleAsync(autoScalePool.Id); + Assert.AreEqual(200, response.Status); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task ResizePool() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "ResizePool", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool resizePool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + // verify exists + BatchPoolResizeContent resizeContent = new BatchPoolResizeContent() + { + TargetDedicatedNodes = 1, + ResizeTimeout = TimeSpan.FromMinutes(10), + }; + + // resize pool + Response response = await client.ResizePoolAsync(poolID, resizeContent); + resizePool = await client.GetPoolAsync(poolID); + Assert.AreEqual(AllocationState.Resizing, resizePool.AllocationState); + + // stop resizing + response = await client.StopPoolResizeAsync(poolID); + Assert.AreEqual(202, response.Status); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task ReplacePool() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "ReplacePool", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool orginalPool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + // replace pool + BatchApplicationPackageReference[] batchApplicationPackageReferences = new BatchApplicationPackageReference[] { + new BatchApplicationPackageReference("dotnotsdkbatchapplication1") + { + Version = "1" + } + }; + + MetadataItem[] metadataIems = new MetadataItem[] { + new MetadataItem("name", "value") + }; + + BatchPoolReplaceContent replaceContent = new BatchPoolReplaceContent(batchApplicationPackageReferences, metadataIems); + Response response = await client.ReplacePoolPropertiesAsync(poolID, replaceContent); + BatchPool replacePool = await client.GetPoolAsync(poolID); + Assert.AreEqual(replacePool.Metadata.First().Value, "value"); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task PatchPool() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "PatchPool", isPlayBack()); + var poolID = iaasWindowsPoolFixture.PoolId; + + try + { + // create a pool to verify we have something to query for + BatchPool orginalPool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + // update pool + BatchPoolUpdateContent updateContent = new BatchPoolUpdateContent(); + updateContent.Metadata.Add(new MetadataItem("name", "value")); + updateContent.ApplicationPackageReferences.Add(new BatchApplicationPackageReference("dotnotsdkbatchapplication1") + { + Version = "1" + }); + + Response response = await client.UpdatePoolAsync(poolID, updateContent); + BatchPool patchPool = await client.GetPoolAsync(poolID); + Assert.AreEqual(patchPool.Metadata.First().Value, "value"); + } + finally + { + await client.DeletePoolAsync(poolID); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchTaskIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchTaskIntegrationTests.cs new file mode 100644 index 0000000000000..2d0def90dde30 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/BatchTaskIntegrationTests.cs @@ -0,0 +1,282 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class BatchTaskIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchTaskIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public BatchTaskIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task AddTask() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "AddTask", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob1"; + string taskID = "Task1"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo); + Response response = await client.CreateJobAsync(batchJobCreateContent); + + var job = await client.GetJobAsync(jobID); + Assert.IsNotNull(job); + + BatchTaskCreateContent taskCreateContent = new BatchTaskCreateContent(taskID, commandLine); + response = await client.CreateTaskAsync(jobID, taskCreateContent); + + BatchTask task = await client.GetTaskAsync(jobID, taskID); + Assert.IsNotNull(task); + Assert.AreEqual(commandLine, task.CommandLine); + } + finally + { + await client.DeleteJobAsync(jobID); + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task AddTaskCollection() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "AddTaskCollection", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob1"; + string taskID = "Task1"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo); + Response response = await client.CreateJobAsync(batchJobCreateContent); + + var job = await client.GetJobAsync(jobID); + Assert.IsNotNull(job); + + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + { + new BatchTaskCreateContent(taskID, commandLine) + }); + + BatchTaskAddCollectionResult batchTaskAddCollectionResult = await client.CreateTaskCollectionAsync(jobID, taskCollection); + + Assert.IsNotNull(batchTaskAddCollectionResult); + BatchTaskAddResult batchTaskAddResult = null; + foreach (BatchTaskAddResult item in batchTaskAddCollectionResult.Value) + { + batchTaskAddResult = item; + } + + Assert.IsNotNull(batchTaskAddResult); + Assert.AreEqual(batchTaskAddResult.TaskId, taskID); + } + finally + { + await client.DeleteJobAsync(jobID); + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task TaskUpdate() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "TaskUpdate", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob1"; + string taskID = "Task1"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo); + Response response = await client.CreateJobAsync(batchJobCreateContent); + + var job = await client.GetJobAsync(jobID); + Assert.IsNotNull(job); + + BatchTaskCreateContent taskCreateContent = new BatchTaskCreateContent(taskID, commandLine); + + response = await client.CreateTaskAsync(jobID, taskCreateContent); + Assert.AreEqual(201, response.Status); + + // get task via lists tasks + BatchTask task = null; + await foreach (BatchTask item in client.GetTasksAsync(jobID)) + { + task = item; + } + + Assert.IsNotNull(task); + Assert.AreEqual(commandLine, task.CommandLine); + + // update task constraints + BatchTaskConstraints batchTaskConstraints = new BatchTaskConstraints() + { + MaxTaskRetryCount = 3, + }; + + task.Constraints = batchTaskConstraints; + response = await client.ReplaceTaskAsync(jobID, taskID, task); + Assert.AreEqual(200, response.Status); + + // verify task got updated + BatchTask updatedTask = await client.GetTaskAsync(jobID, taskID); + Assert.IsNotNull(updatedTask); + Assert.AreEqual(3, updatedTask.Constraints.MaxTaskRetryCount); + } + finally + { + await client.DeleteJobAsync(jobID); + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task TaskListSubTasks() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "TaskListSubTasks", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob1"; + string taskID = "Task1"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPoolCreateContent batchPoolCreateOptions = iaasWindowsPoolFixture.CreatePoolOptions(); + batchPoolCreateOptions.TargetDedicatedNodes = 3; + batchPoolCreateOptions.TaskSlotsPerNode = 1; + batchPoolCreateOptions.EnableInterNodeCommunication = true; + Response response = await client.CreatePoolAsync(batchPoolCreateOptions); + BatchPool pool = await iaasWindowsPoolFixture.WaitForPoolAllocation(client, iaasWindowsPoolFixture.PoolId); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo); + response = await client.CreateJobAsync(batchJobCreateContent); + + var job = await client.GetJobAsync(jobID); + Assert.IsNotNull(job); + + BatchTaskCreateContent taskCreateContent = new BatchTaskCreateContent(taskID, commandLine) + { + RequiredSlots =1, + MultiInstanceSettings = new MultiInstanceSettings(commandLine) + { + NumberOfInstances = 1, + }, + }; + + response = await client.CreateTaskAsync(jobID, taskCreateContent); + Assert.AreEqual(201, response.Status); + + // list subtasks + int count = 0; + await foreach (BatchSubtask item in client.GetSubTasksAsync(jobID, taskID)) + { + count++; + } + Assert.AreEqual(0, count); + } + finally + { + await client.DeleteJobAsync(jobID); + await client.DeletePoolAsync(poolID); + } + } + + [RecordedTest] + public async Task TaskReactive() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "TaskReactive", isPlayBack()); + string poolID = iaasWindowsPoolFixture.PoolId; + string jobID = "batchJob1"; + string taskID = "Task1"; + string commandLine = "cmd /c echo Hello World"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(0); + + BatchPoolInfo batchPoolInfo = new BatchPoolInfo() + { + PoolId = pool.Id + }; + BatchJobCreateContent batchJobCreateContent = new BatchJobCreateContent(jobID, batchPoolInfo); + Response response = await client.CreateJobAsync(batchJobCreateContent); + + var job = await client.GetJobAsync(jobID); + Assert.IsNotNull(job); + + BatchTaskCreateContent taskCreateContent = new BatchTaskCreateContent(taskID, commandLine); + response = await client.CreateTaskAsync(jobID, taskCreateContent); + + BatchTask task = await client.GetTaskAsync(jobID, taskID); + Assert.IsNotNull(task); + Assert.AreEqual(commandLine, task.CommandLine); + + response = await client.TerminateTaskAsync(jobID, taskID); + Assert.AreEqual(204, response.Status); + + response = await client.ReactivateTaskAsync(jobID, taskID); + Assert.AreEqual(204, response.Status); + } + finally + { + await client.DeleteJobAsync(jobID); + await client.DeletePoolAsync(poolID); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/EndToEndIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/EndToEndIntegrationTests.cs new file mode 100644 index 0000000000000..98dab379a7171 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/EndToEndIntegrationTests.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Security.Cryptography; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class EndToEndIntegrationTests : BatchLiveTestBase + { + public EndToEndIntegrationTests(bool isAsync) : base(isAsync) + { + } + + public EndToEndIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/FileIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/FileIntegrationTests.cs new file mode 100644 index 0000000000000..b5ffdaccad116 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/FileIntegrationTests.cs @@ -0,0 +1,186 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.IO; +using System.Linq; +using System.Text; +using System.Text.Json; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core; +using Azure.Core.TestFramework; +using NUnit.Framework; +using static System.Net.WebRequestMethods; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class FileIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public FileIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public FileIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task GetTaskFile() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "GetTaskFile", isPlayBack()); + string poolId = iaasWindowsPoolFixture.PoolId; + string jobId = "batchJob1"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + await client.CreateJobAsync(new BatchJobCreateContent(jobId, new BatchPoolInfo() { PoolId = poolId })); + + for (int i = 0; i < 5; i++) + { + string taskId = $"task-{i}"; + await client.CreateTaskAsync(jobId, new BatchTaskCreateContent(taskId, $"cmd /c echo Hello World {taskId}")); + } + + await waitForTasksToComplete(client, jobId, isPlayBack()); + var completedTasks = client.GetTasksAsync(jobId, filter: "state eq 'completed'"); + + int index = 0; + await foreach (BatchTask t in completedTasks) + { + var outputFileName = t.ExecutionInfo.ExitCode == 0 ? "stdout.txt" : "stderr.txt"; + + BatchFileProperties batchFilePropertiesesponse = await client.GetTaskFilePropertiesAsync(jobId, t.Id, outputFileName); + Assert.IsNotNull(batchFilePropertiesesponse); + Assert.IsNotEmpty(batchFilePropertiesesponse.BatchFileUrl); + + BinaryData fileContents = await client.GetTaskFileAsync(jobId, t.Id, outputFileName); + using (var reader = new StreamReader(fileContents.ToStream())) + { + string line = await reader.ReadLineAsync(); + Assert.IsNotEmpty(line); + Assert.AreEqual($"Hello World task-{index++}", line); + } + + // Currently broken + //await foreach (BatchNodeFile item in client.GetTaskFilesAsync(jobId, t.Id)) + //{ + // string url = item.Url; + // long contentLenght = item.Properties.ContentLength; + //} + } + } + finally + { + await client.DeleteJobAsync(jobId); + await client.DeletePoolAsync(poolId); + } + } + + [RecordedTest] + public async Task DeleteTaskFile() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "DeleteTaskFile", isPlayBack()); + string poolId = iaasWindowsPoolFixture.PoolId; + string jobId = "batchJob1"; + string taskId = "batchTask1"; + string outputFileName = "stdout.txt"; + + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + await client.CreateJobAsync(new BatchJobCreateContent(jobId, new BatchPoolInfo() { PoolId = poolId })); + + await client.CreateTaskAsync(jobId, new BatchTaskCreateContent(taskId, $"cmd /c echo Hello World")); + + await waitForTasksToComplete(client, jobId, isPlayBack()); + + BinaryData fileContents = await client.GetTaskFileAsync(jobId, taskId, outputFileName); + using (var reader = new StreamReader(fileContents.ToStream())) + { + string line = await reader.ReadLineAsync(); + Assert.IsNotEmpty(line); + Assert.AreEqual($"Hello World", line); + } + + // delete the file + Response response = await client.DeleteTaskFileAsync(jobId, taskId, outputFileName); + Assert.AreEqual(response.Status, 200); + + //verify deleted, we should get an exception because the file is not found + var ex = Assert.ThrowsAsync(async () => await client.GetTaskFileAsync(jobId, taskId, outputFileName)); + } + finally + { + await client.DeleteJobAsync(jobId); + await client.DeletePoolAsync(poolId); + } + } + + [RecordedTest] + public async Task GetNodeFile() + { + var client = CreateBatchClient(); + IaasLinuxPoolFixture iaasWindowsPoolFixture = new IaasLinuxPoolFixture(client, "GetNodeFile", isPlayBack()); + string poolId = iaasWindowsPoolFixture.PoolId; + string jobId = "batchJob1"; + string file = "workitems\\batchJob1\\job-1\\task-0\\stdout.txt"; + try + { + // create a pool to verify we have something to query for + BatchPool pool = await iaasWindowsPoolFixture.CreatePoolAsync(1); + + await client.CreateJobAsync(new BatchJobCreateContent(jobId, new BatchPoolInfo() { PoolId = poolId })); + + for (int i = 0; i < 5; i++) + { + string taskId = $"task-{i}"; + await client.CreateTaskAsync(jobId, new BatchTaskCreateContent(taskId, $"cmd /c echo Hello World {taskId}")); + } + + await waitForTasksToComplete(client, jobId, isPlayBack()); + + await foreach (BatchNode item in client.GetNodesAsync(poolId)) + { + BatchFileProperties batchFileProperties = await client.GetNodeFilePropertiesAsync(poolId, item.Id, file); + Assert.IsNotNull(batchFileProperties); + Assert.IsNotEmpty(batchFileProperties.BatchFileUrl); + + BinaryData fileContents = await client.GetNodeFileAsync(poolId, item.Id, file); + using (var reader = new StreamReader(fileContents.ToStream())) + { + string line = await reader.ReadLineAsync(); + Assert.IsNotEmpty(line); + //Assert.AreEqual($"Hello World task-{index++}", line); + } + + await client.DeleteNodeFileAsync(poolId, item.Id, file); + + //verify deleted, we should get an exception because the file is not found + var ex = Assert.ThrowsAsync(async () => await client.GetNodeFileAsync(poolId, item.Id, file)); + } + } + finally + { + await client.DeleteJobAsync(jobId); + await client.DeletePoolAsync(poolId); + } + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tests/Integration/MiscIntegrationTests.cs b/sdk/batch/Azure.Compute.Batch/tests/Integration/MiscIntegrationTests.cs new file mode 100644 index 0000000000000..1e3ac8afc73bc --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tests/Integration/MiscIntegrationTests.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +using System; +using System.Collections.Generic; +using System.Globalization; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using Azure.Compute.Batch.Tests.Infrastructure; +using Azure.Core.TestFramework; +using NUnit.Framework; + +namespace Azure.Compute.Batch.Tests.Integration +{ + internal class MiscIntegrationTests : BatchLiveTestBase + { + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public MiscIntegrationTests(bool isAsync, RecordedTestMode? mode = null) : base(isAsync, mode) + { + } + + /// + /// Initializes a new instance of the class. + /// + /// A flag used by the Azure Core Test Framework to differentiate between tests for asynchronous and synchronous methods. + public MiscIntegrationTests(bool isAsync) : base(isAsync) + { + } + + [RecordedTest] + public async Task ListSupportedImages() + { + var client = CreateBatchClient(); + + // get supported images + int count = 0; + await foreach (BatchSupportedImage item in client.GetSupportedImagesAsync()) + { + count++; + } + Assert.NotZero(count); + } + } +} diff --git a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml new file mode 100644 index 0000000000000..399863ae4a83f --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml @@ -0,0 +1,3 @@ +directory: specification/batch/Azure.Batch +commit: 9df71d5a717e4ed5e6728e7e6ba2fead60f62243 +repo: Azure/azure-rest-api-specs diff --git a/sdk/batch/GlobalSuppressions.cs b/sdk/batch/GlobalSuppressions.cs new file mode 100644 index 0000000000000..6fbc274a95f6e --- /dev/null +++ b/sdk/batch/GlobalSuppressions.cs @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This file is used by Code Analysis to maintain SuppressMessage +// attributes that are applied to this project. +// Project-level suppressions either have no target or are given +// a specific target and scoped to a namespace, type, member, etc. + +using System.Diagnostics.CodeAnalysis; + +[assembly: SuppressMessage("Usage", "AZC0002:DO ensure all service methods, both asynchronous and synchronous, take an optional CancellationToken parameter called cancellationToken.", Justification = "", Scope = "namespaceanddescendants", Target = "~N:Azure.Compute.Batch")] +[assembly: SuppressMessage("Usage", "AZC0012:Avoid single word type names", Justification = "", Scope = "type", Target = "~T:Azure.Compute.Batch.Schedule")] diff --git a/sdk/batch/ci.yml b/sdk/batch/ci.yml index 54d4512ce1c09..5827841bf242b 100644 --- a/sdk/batch/ci.yml +++ b/sdk/batch/ci.yml @@ -32,6 +32,8 @@ extends: ArtifactName: packages BuildSnippets: false Artifacts: + - name: Azure.Compute.Batch + safeName: AzureComputeBatch - name: Microsoft.Azure.Batch safeName: MicrosoftAzureBatch - name: Microsoft.Azure.Batch.FileStaging diff --git a/sdk/batch/test-resources.json b/sdk/batch/test-resources.json index 705b696327d17..f6e02bcc4aa55 100644 --- a/sdk/batch/test-resources.json +++ b/sdk/batch/test-resources.json @@ -11,21 +11,61 @@ }, "variables": { "batch_account_name": "dotnotsdkbatchaccount1", + "batch_storage_name": "dotnotsdkbatchstorage1", "location": "eastus", - "api_version": "2023-11-01" + "api_version": "2023-11-01", + "mgmtApiVersion": "2022-05-01", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } }, "resources": [ + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('batch_storage_name')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot", + "minimumTlsVersion": "TLS1_2", + "allowBlobPublicAccess": true + } + }, { "type": "Microsoft.Batch/batchAccounts", "apiVersion": "[variables('api_version')]", "name": "[variables('batch_account_name')]", "location": "[variables('location')]", - "dependsOn": [], "tags": {}, "identity": { "type": "None" }, "properties": { + "autoStorage": { + "storageAccountId": "[resourceId('Microsoft.Storage/storageAccounts', variables('batch_storage_name'))]" + }, "publicNetworkAccess": "Enabled", "poolAllocationMode": "BatchService", "allowedAuthenticationModes": [ @@ -38,10 +78,17 @@ "defaultAction": "Allow" } } - } + }, + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts', variables('batch_storage_name'))]" + ] } ], "outputs": { + "batch_storage_name": { + "type": "string", + "value": "[variables('batch_storage_name')]" + }, "batch_account_name": { "type": "string", "value": "[variables('batch_account_name')]"