diff --git a/src/storagecache/HISTORY.rst b/src/storagecache/HISTORY.rst new file mode 100644 index 00000000000..27f152061e8 --- /dev/null +++ b/src/storagecache/HISTORY.rst @@ -0,0 +1,8 @@ +.. :changelog: + +Release History +=============== + +0.1.0 +++++++ +* Initial release. diff --git a/src/storagecache/README.md b/src/storagecache/README.md new file mode 100644 index 00000000000..7807300395a --- /dev/null +++ b/src/storagecache/README.md @@ -0,0 +1,5 @@ +Microsoft Azure CLI 'storagecache' Extension +========================================== + +This package is for the 'storagecache' extension. +i.e. 'az storagecache' diff --git a/src/storagecache/azext_storagecache/__init__.py b/src/storagecache/azext_storagecache/__init__.py new file mode 100644 index 00000000000..86b3f29aca6 --- /dev/null +++ b/src/storagecache/azext_storagecache/__init__.py @@ -0,0 +1,46 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.cli.core import AzCommandsLoader +from azext_storagecache.generated._help import helps # pylint: disable=unused-import + + +class StorageCacheManagementClientCommandsLoader(AzCommandsLoader): + + def __init__(self, cli_ctx=None): + from azure.cli.core.commands import CliCommandType + from azext_storagecache.generated._client_factory import cf_storagecache + storagecache_custom = CliCommandType( + operations_tmpl='azext_storagecache.custom#{}', + client_factory=cf_storagecache) + parent = super(StorageCacheManagementClientCommandsLoader, self) + parent.__init__(cli_ctx=cli_ctx, custom_command_type=storagecache_custom) + + def load_command_table(self, args): + from azext_storagecache.generated.commands import load_command_table + load_command_table(self, args) + try: + from azext_storagecache.manual.commands import load_command_table as load_command_table_manual + load_command_table_manual(self, args) + except ImportError: + pass + return self.command_table + + def load_arguments(self, command): + from azext_storagecache.generated._params import load_arguments + load_arguments(self, command) + try: + from azext_storagecache.manual._params import load_arguments as load_arguments_manual + load_arguments_manual(self, command) + except ImportError: + pass + + +COMMAND_LOADER_CLS = StorageCacheManagementClientCommandsLoader diff --git a/src/storagecache/azext_storagecache/action.py b/src/storagecache/azext_storagecache/action.py new file mode 100644 index 00000000000..a846b2766c4 --- /dev/null +++ b/src/storagecache/azext_storagecache/action.py @@ -0,0 +1,17 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wildcard-import +# pylint: disable=unused-wildcard-import + +from .generated.action import * # noqa: F403 +try: + from .manual.action import * # noqa: F403 +except ImportError: + pass diff --git a/src/storagecache/azext_storagecache/azext_metadata.json b/src/storagecache/azext_storagecache/azext_metadata.json new file mode 100644 index 00000000000..7b56fb1e11a --- /dev/null +++ b/src/storagecache/azext_storagecache/azext_metadata.json @@ -0,0 +1,4 @@ +{ + "azext.isExperimental": true, + "azext.minCliCoreVersion": "2.3.1" +} \ No newline at end of file diff --git a/src/storagecache/azext_storagecache/custom.py b/src/storagecache/azext_storagecache/custom.py new file mode 100644 index 00000000000..7f31674ce96 --- /dev/null +++ b/src/storagecache/azext_storagecache/custom.py @@ -0,0 +1,17 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=wildcard-import +# pylint: disable=unused-wildcard-import + +from .generated.custom import * # noqa: F403 +try: + from .manual.custom import * # noqa: F403 +except ImportError: + pass diff --git a/src/storagecache/azext_storagecache/generated/__init__.py b/src/storagecache/azext_storagecache/generated/__init__.py new file mode 100644 index 00000000000..ee0c4f36bd0 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/generated/_client_factory.py b/src/storagecache/azext_storagecache/generated/_client_factory.py new file mode 100644 index 00000000000..8aa1cfa00bd --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_client_factory.py @@ -0,0 +1,35 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + + +def cf_storagecache(cli_ctx, *_): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from ..vendored_sdks.storagecache import StorageCacheManagementClient + return get_mgmt_service_client(cli_ctx, StorageCacheManagementClient) + + +def cf_sku(cli_ctx, *_): + return cf_storagecache(cli_ctx).sku + + +def cf_usage_model(cli_ctx, *_): + return cf_storagecache(cli_ctx).usage_model + + +def cf_asc_operation(cli_ctx, *_): + return cf_storagecache(cli_ctx).asc_operation + + +def cf_cache(cli_ctx, *_): + return cf_storagecache(cli_ctx).cache + + +def cf_storage_target(cli_ctx, *_): + return cf_storagecache(cli_ctx).storage_target diff --git a/src/storagecache/azext_storagecache/generated/_help.py b/src/storagecache/azext_storagecache/generated/_help.py new file mode 100644 index 00000000000..198fa979e18 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_help.py @@ -0,0 +1,306 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +from knack.help_files import helps + + +helps['storagecache sku'] = """ + type: group + short-summary: storagecache sku +""" + +helps['storagecache sku list'] = """ + type: command + short-summary: Get the list of StorageCache.Cache SKUs available to this subscription. + examples: + - name: Skus_List + text: |- + az storagecache sku list +""" + +helps['storagecache usage-model'] = """ + type: group + short-summary: storagecache usage-model +""" + +helps['storagecache usage-model list'] = """ + type: command + short-summary: Get the list of Cache Usage Models available to this subscription. + examples: + - name: UsageModels_List + text: |- + az storagecache usage-model list +""" + +helps['storagecache asc-operation'] = """ + type: group + short-summary: storagecache asc-operation +""" + +helps['storagecache asc-operation show'] = """ + type: command + short-summary: Gets the status of an asynchronous operation for the Azure HPC cache + examples: + - name: AscOperations_Get + text: |- + az storagecache asc-operation show --operation-id "testoperationid" --location "West US" +""" + +helps['storagecache cache'] = """ + type: group + short-summary: storagecache cache +""" + +helps['storagecache cache list'] = """ + type: command + short-summary: Returns all Caches the user has access to under a subscription. + examples: + - name: Caches_ListByResourceGroup + text: |- + az storagecache cache list --resource-group "scgroup" +""" + +helps['storagecache cache show'] = """ + type: command + short-summary: Returns a Cache. + examples: + - name: Caches_Get + text: |- + az storagecache cache show --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache cache create'] = """ + type: command + short-summary: Create or update a Cache. + parameters: + - name: --network-settings + short-summary: Specifies network settings of the cache. + long-summary: | + Usage: --network-settings mtu=XX + + mtu: The IPv4 maximum transmission unit configured for the subnet. + - name: --security-settings + short-summary: Specifies security settings of the cache. + long-summary: | + Usage: --security-settings root-squash=XX + + root-squash: root squash of cache property. + examples: + - name: Caches_CreateOrUpdate + text: |- + az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-\ +0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --s\ +ku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache cache update'] = """ + type: command + short-summary: Update a Cache instance. + parameters: + - name: --network-settings + short-summary: Specifies network settings of the cache. + long-summary: | + Usage: --network-settings mtu=XX + + mtu: The IPv4 maximum transmission unit configured for the subnet. + - name: --security-settings + short-summary: Specifies security settings of the cache. + long-summary: | + Usage: --security-settings root-squash=XX + + root-squash: root squash of cache property. + examples: + - name: Caches_Update + text: |- + az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-\ +0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --s\ +ku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache cache delete'] = """ + type: command + short-summary: Schedules a Cache for deletion. + examples: + - name: Caches_Delete + text: |- + az storagecache cache delete --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache flush'] = """ + type: command + short-summary: Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will see e\ +rrors returned until the flush is complete. + examples: + - name: Caches_Flush + text: |- + az storagecache cache flush --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache start'] = """ + type: command + short-summary: Tells a Stopped state Cache to transition to Active state. + examples: + - name: Caches_Start + text: |- + az storagecache cache start --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache stop'] = """ + type: command + short-summary: Tells an Active Cache to transition to Stopped state. + examples: + - name: Caches_Stop + text: |- + az storagecache cache stop --cache-name "sc" --resource-group "scgroup" +""" + +helps['storagecache cache upgrade-firmware'] = """ + type: command + short-summary: Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no effect. + examples: + - name: Caches_UpgradeFirmware + text: |- + az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache cache wait'] = """ + type: command + short-summary: Place the CLI in a waiting state until a condition of the storagecache cache is met. + examples: + - name: Pause executing next line of CLI script until the storagecache cache is successfully created. + text: |- + az storagecache cache wait --cache-name "sc1" --resource-group "scgroup" --created + - name: Pause executing next line of CLI script until the storagecache cache is successfully deleted. + text: |- + az storagecache cache wait --cache-name "sc1" --resource-group "scgroup" --deleted +""" + +helps['storagecache storage-target'] = """ + type: group + short-summary: storagecache storage-target +""" + +helps['storagecache storage-target list'] = """ + type: command + short-summary: Returns a list of Storage Targets for the specified Cache. + examples: + - name: StorageTargets_List + text: |- + az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup" +""" + +helps['storagecache storage-target show'] = """ + type: command + short-summary: Returns a Storage Target from a Cache. + examples: + - name: StorageTargets_Get + text: |- + az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1" +""" + +helps['storagecache storage-target create'] = """ + type: command + short-summary: Create or update a Storage Target. This operation is allowed at any time, but if the Cache is down o\ +r unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again. + parameters: + - name: --junctions + short-summary: List of Cache namespace junctions to target for namespace associations. + long-summary: | + Usage: --junctions namespace-path=XX target-path=XX nfs-export=XX + + namespace-path: Namespace path on a Cache for a Storage Target. + target-path: Path in Storage Target to which namespacePath points. + nfs-export: NFS export where targetPath exists. + + Multiple actions can be specified by using more than one --junctions argument. + - name: --nfs3 + short-summary: Properties when targetType is nfs3. + long-summary: | + Usage: --nfs3 target=XX usage-model=XX + + target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + usage-model: Identifies the primary usage model to be used for this Storage Target. Get choices from .../us\ +ageModels + - name: --clfs + short-summary: Properties when targetType is clfs. + long-summary: | + Usage: --clfs target=XX + + target: Resource ID of storage container. + examples: + - name: StorageTargets_CreateOrUpdate + text: |- + az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" --junct\ +ions namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" --junctions namespace-path="/path2/o\ +n/cache" nfs-export="exp2" target-path="/path2/on/exp2" --nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --ta\ +rget-type "nfs3" +""" + +helps['storagecache storage-target update'] = """ + type: command + short-summary: Create or update a Storage Target. This operation is allowed at any time, but if the Cache is down o\ +r unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again. + parameters: + - name: --junctions + short-summary: List of Cache namespace junctions to target for namespace associations. + long-summary: | + Usage: --junctions namespace-path=XX target-path=XX nfs-export=XX + + namespace-path: Namespace path on a Cache for a Storage Target. + target-path: Path in Storage Target to which namespacePath points. + nfs-export: NFS export where targetPath exists. + + Multiple actions can be specified by using more than one --junctions argument. + - name: --nfs3 + short-summary: Properties when targetType is nfs3. + long-summary: | + Usage: --nfs3 target=XX usage-model=XX + + target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + usage-model: Identifies the primary usage model to be used for this Storage Target. Get choices from .../us\ +ageModels + - name: --clfs + short-summary: Properties when targetType is clfs. + long-summary: | + Usage: --clfs target=XX + + target: Resource ID of storage container. + examples: + - name: StorageTargets_CreateOrUpdate + text: |- + az storagecache storage-target update --cache-name "sc1" --resource-group "scgroup" --name "st1" --junct\ +ions namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" --junctions namespace-path="/path2/o\ +n/cache" nfs-export="exp2" target-path="/path2/on/exp2" --nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --ta\ +rget-type "nfs3" +""" + +helps['storagecache storage-target delete'] = """ + type: command + short-summary: Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache is do\ +wn or unhealthy, the actual removal of the Storage Target may be delayed until the Cache is healthy again. Note that if\ + the Cache has data to flush to the Storage Target, the data will be flushed before the Storage Target will be deleted. + examples: + - name: StorageTargets_Delete + text: |- + az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1" +""" + +helps['storagecache storage-target wait'] = """ + type: command + short-summary: Place the CLI in a waiting state until a condition of the storagecache storage-target is met. + examples: + - name: Pause executing next line of CLI script until the storagecache storage-target is successfully created. + text: |- + az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" --created + - name: Pause executing next line of CLI script until the storagecache storage-target is successfully deleted. + text: |- + az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" --deleted +""" diff --git a/src/storagecache/azext_storagecache/generated/_params.py b/src/storagecache/azext_storagecache/generated/_params.py new file mode 100644 index 00000000000..75654235f79 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_params.py @@ -0,0 +1,186 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines +# pylint: disable=too-many-statements + +from knack.arguments import CLIArgumentType +from azure.cli.core.commands.parameters import ( + tags_type, + get_enum_type, + resource_group_name_type, + get_location_type +) +from azure.cli.core.commands.validators import get_default_location_from_resource_group +from azext_storagecache.action import ( + AddNetworkSettings, + AddSecuritySettings, + AddJunctions, + AddNfs3, + AddClfs +) + + +def load_arguments(self, _): + + with self.argument_context('storagecache sku list') as c: + pass + + with self.argument_context('storagecache usage-model list') as c: + pass + + with self.argument_context('storagecache asc-operation show') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('operation_id', help='The operation id which uniquely identifies the asynchronous operation.', + id_part='child_name_1') + + with self.argument_context('storagecache cache list') as c: + c.argument('resource_group_name', resource_group_name_type) + + with self.argument_context('storagecache cache show') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache create') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('tags', tags_type) + c.argument('location', arg_type=get_location_type(self.cli_ctx), + validator=get_default_location_from_resource_group) + c.argument('sku_name', help='SKU name for this Cache.') + c.argument('cache_size_gb', help='The size of this Cache, in GB.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', 'Delet' + 'ing', 'Updating']), help='ARM provisioning state, see https://github.com/Azure/azure-resource-manag' + 'er-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property') + c.argument('subnet', help='Subnet used for the Cache.') + c.argument('network_settings', action=AddNetworkSettings, nargs='+', help='Specifies network settings of the ca' + 'che.') + c.argument('encryption_settings', arg_type=CLIArgumentType(options_list=['--encryption-settings'], help='Specif' + 'ies encryption settings of the cache. Expected value: json-string/@json-file.')) + c.argument('security_settings', action=AddSecuritySettings, nargs='+', help='Specifies security settings of the' + ' cache.') + c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='The type of identity used' + ' for the cache') + + with self.argument_context('storagecache cache update') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('tags', tags_type) + c.argument('location', arg_type=get_location_type(self.cli_ctx), + validator=get_default_location_from_resource_group) + c.argument('sku_name', help='SKU name for this Cache.') + c.argument('cache_size_gb', help='The size of this Cache, in GB.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', 'Delet' + 'ing', 'Updating']), help='ARM provisioning state, see https://github.com/Azure/azure-resource-manag' + 'er-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property') + c.argument('subnet', help='Subnet used for the Cache.') + c.argument('network_settings', action=AddNetworkSettings, nargs='+', help='Specifies network settings of the ca' + 'che.') + c.argument('encryption_settings', arg_type=CLIArgumentType(options_list=['--encryption-settings'], help='Specif' + 'ies encryption settings of the cache. Expected value: json-string/@json-file.')) + c.argument('security_settings', action=AddSecuritySettings, nargs='+', help='Specifies security settings of the' + ' cache.') + c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='The type of identity used' + ' for the cache') + + with self.argument_context('storagecache cache delete') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache flush') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache start') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache stop') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache upgrade-firmware') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache cache wait') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache storage-target list') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache storage-target show') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n'], help='Name of the Storage Target. Length of na' + 'me must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.') + + with self.argument_context('storagecache storage-target create') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n'], help='Name of the Storage Target. Length of na' + 'me must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.') + c.argument('target_base_type', arg_type=get_enum_type(['nfs3', 'clfs', 'unknown']), help='Type of the Storage T' + 'arget.') + c.argument('junctions', action=AddJunctions, nargs='+', help='List of Cache namespace junctions to target for n' + 'amespace associations.') + c.argument('target_type', help='Type of the Storage Target.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', 'Delet' + 'ing', 'Updating']), help='ARM provisioning state, see https://github.com/Azure/azure-resource-manag' + 'er-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property') + c.argument('nfs3', action=AddNfs3, nargs='+', help='Properties when targetType is nfs3.') + c.argument('clfs', action=AddClfs, nargs='+', help='Properties when targetType is clfs.') + c.argument('unknown', arg_type=CLIArgumentType(options_list=['--unknown'], help='Properties when targetType is ' + 'unknown. Expected value: json-string/@json-file.')) + + with self.argument_context('storagecache storage-target update') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n'], help='Name of the Storage Target. Length of na' + 'me must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.') + c.argument('target_base_type', arg_type=get_enum_type(['nfs3', 'clfs', 'unknown']), help='Type of the Storage T' + 'arget.') + c.argument('junctions', action=AddJunctions, nargs='+', help='List of Cache namespace junctions to target for n' + 'amespace associations.') + c.argument('target_type', help='Type of the Storage Target.') + c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', 'Delet' + 'ing', 'Updating']), help='ARM provisioning state, see https://github.com/Azure/azure-resource-manag' + 'er-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property') + c.argument('nfs3', action=AddNfs3, nargs='+', help='Properties when targetType is nfs3.') + c.argument('clfs', action=AddClfs, nargs='+', help='Properties when targetType is clfs.') + c.argument('unknown', arg_type=CLIArgumentType(options_list=['--unknown'], help='Properties when targetType is ' + 'unknown. Expected value: json-string/@json-file.')) + + with self.argument_context('storagecache storage-target delete') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n'], help='Name of Storage Target.') + + with self.argument_context('storagecache storage-target wait') as c: + c.argument('resource_group_name', resource_group_name_type) + c.argument('cache_name', help='Name of Cache. Length of name must be not greater than 80 and chars must be in l' + 'ist of [-0-9a-zA-Z_] char class.') + c.argument('storage_target_name', options_list=['--name', '-n'], help='Name of the Storage Target. Length of na' + 'me must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.') diff --git a/src/storagecache/azext_storagecache/generated/_validators.py b/src/storagecache/azext_storagecache/generated/_validators.py new file mode 100644 index 00000000000..e5ac7838677 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/_validators.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- diff --git a/src/storagecache/azext_storagecache/generated/action.py b/src/storagecache/azext_storagecache/generated/action.py new file mode 100644 index 00000000000..85b110dc6cc --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/action.py @@ -0,0 +1,131 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access + +import argparse +from knack.util import CLIError +from collections import defaultdict + + +class AddNetworkSettings(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + namespace.network_settings = action + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + d['mtu'] = 1500 + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'mtu': + d['mtu'] = v[0] + return d + + +class AddSecuritySettings(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + namespace.security_settings = action + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'root-squash': + d['root_squash'] = v[0] + return d + + +class AddJunctions(argparse._AppendAction): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + super(AddJunctions, self).__call__(parser, namespace, action, option_string) + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'namespace-path': + d['namespace_path'] = v[0] + elif kl == 'target-path': + d['target_path'] = v[0] + elif kl == 'nfs-export': + d['nfs_export'] = v[0] + return d + + +class AddNfs3(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + namespace.nfs3 = action + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'target': + d['target'] = v[0] + elif kl == 'usage-model': + d['usage_model'] = v[0] + return d + + +class AddClfs(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + action = self.get_action(values, option_string) + namespace.clfs = action + + def get_action(self, values, option_string): # pylint: disable=no-self-use + try: + properties = defaultdict(list) + for (k, v) in (x.split('=', 1) for x in values): + properties[k].append(v) + properties = dict(properties) + except ValueError: + raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string)) + d = {} + for k in properties: + kl = k.lower() + v = properties[k] + if kl == 'target': + d['target'] = v[0] + return d diff --git a/src/storagecache/azext_storagecache/generated/commands.py b/src/storagecache/azext_storagecache/generated/commands.py new file mode 100644 index 00000000000..f7117153434 --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/commands.py @@ -0,0 +1,70 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from azure.cli.core.commands import CliCommandType + + +def load_command_table(self, _): + + from azext_storagecache.generated._client_factory import cf_sku + storagecache_sku = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._sku_operations#SkuOperations.{}', + client_factory=cf_sku) + with self.command_group('storagecache sku', storagecache_sku, client_factory=cf_sku, is_experimental=True) as g: + g.custom_command('list', 'storagecache_sku_list') + + from azext_storagecache.generated._client_factory import cf_usage_model + storagecache_usage_model = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._usage_model_operations#UsageModelOpe' + 'rations.{}', + client_factory=cf_usage_model) + with self.command_group('storagecache usage-model', storagecache_usage_model, client_factory=cf_usage_model, + is_experimental=True) as g: + g.custom_command('list', 'storagecache_usage_model_list') + + from azext_storagecache.generated._client_factory import cf_asc_operation + storagecache_asc_operation = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._asc_operation_operations#AscOperatio' + 'nOperations.{}', + client_factory=cf_asc_operation) + with self.command_group('storagecache asc-operation', storagecache_asc_operation, client_factory=cf_asc_operation, + is_experimental=True) as g: + g.custom_show_command('show', 'storagecache_asc_operation_show') + + from azext_storagecache.generated._client_factory import cf_cache + storagecache_cache = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._cache_operations#CacheOperations.{}', + client_factory=cf_cache) + with self.command_group('storagecache cache', storagecache_cache, client_factory=cf_cache, + is_experimental=True) as g: + g.custom_command('list', 'storagecache_cache_list') + g.custom_show_command('show', 'storagecache_cache_show') + g.custom_command('create', 'storagecache_cache_create', supports_no_wait=True) + g.custom_command('update', 'storagecache_cache_update') + g.custom_command('delete', 'storagecache_cache_delete', supports_no_wait=True) + g.custom_command('flush', 'storagecache_cache_flush', supports_no_wait=True) + g.custom_command('start', 'storagecache_cache_start', supports_no_wait=True) + g.custom_command('stop', 'storagecache_cache_stop', supports_no_wait=True) + g.custom_command('upgrade-firmware', 'storagecache_cache_upgrade_firmware', supports_no_wait=True) + g.custom_wait_command('wait', 'storagecache_cache_show') + + from azext_storagecache.generated._client_factory import cf_storage_target + storagecache_storage_target = CliCommandType( + operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._storage_target_operations#StorageTar' + 'getOperations.{}', + client_factory=cf_storage_target) + with self.command_group('storagecache storage-target', storagecache_storage_target, + client_factory=cf_storage_target, is_experimental=True) as g: + g.custom_command('list', 'storagecache_storage_target_list') + g.custom_show_command('show', 'storagecache_storage_target_show') + g.custom_command('create', 'storagecache_storage_target_create', supports_no_wait=True) + g.custom_command('update', 'storagecache_storage_target_update', supports_no_wait=True) + g.custom_command('delete', 'storagecache_storage_target_delete', supports_no_wait=True) + g.custom_wait_command('wait', 'storagecache_storage_target_show') diff --git a/src/storagecache/azext_storagecache/generated/custom.py b/src/storagecache/azext_storagecache/generated/custom.py new file mode 100644 index 00000000000..e0cd1ae208f --- /dev/null +++ b/src/storagecache/azext_storagecache/generated/custom.py @@ -0,0 +1,243 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +import json +from azure.cli.core.util import sdk_no_wait + + +def storagecache_sku_list(client): + return client.list() + + +def storagecache_usage_model_list(client): + return client.list() + + +def storagecache_asc_operation_show(client, + location, + operation_id): + return client.get(location=location, + operation_id=operation_id) + + +def storagecache_cache_list(client, + resource_group_name=None): + if resource_group_name: + return client.list_by_resource_group(resource_group_name=resource_group_name) + return client.list() + + +def storagecache_cache_show(client, + resource_group_name, + cache_name): + return client.get(resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_create(client, + resource_group_name, + cache_name, + tags=None, + location=None, + sku_name=None, + cache_size_gb=None, + provisioning_state=None, + subnet=None, + network_settings=None, + encryption_settings=None, + security_settings=None, + identity_type=None, + no_wait=False): + if isinstance(tags, str): + tags = json.loads(tags) + if isinstance(encryption_settings, str): + encryption_settings = json.loads(encryption_settings) + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + name=sku_name, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=json.loads("{}"), + network_settings=network_settings, + encryption_settings=encryption_settings, + security_settings=security_settings, + type=identity_type) + + +def storagecache_cache_update(client, + resource_group_name, + cache_name, + tags=None, + location=None, + sku_name=None, + cache_size_gb=None, + provisioning_state=None, + subnet=None, + network_settings=None, + encryption_settings=None, + security_settings=None, + identity_type=None): + if isinstance(tags, str): + tags = json.loads(tags) + if isinstance(encryption_settings, str): + encryption_settings = json.loads(encryption_settings) + return client.update(resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + name=sku_name, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=json.loads("{}"), + network_settings=network_settings, + encryption_settings=encryption_settings, + security_settings=security_settings, + type=identity_type) + + +def storagecache_cache_delete(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_flush(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_flush, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_start(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_start, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_stop(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_stop, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_cache_upgrade_firmware(client, + resource_group_name, + cache_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_upgrade_firmware, + resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_storage_target_list(client, + resource_group_name, + cache_name): + return client.list_by_cache(resource_group_name=resource_group_name, + cache_name=cache_name) + + +def storagecache_storage_target_show(client, + resource_group_name, + cache_name, + storage_target_name): + return client.get(resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name) + + +def storagecache_storage_target_create(client, + resource_group_name, + cache_name, + storage_target_name, + target_base_type=None, + junctions=None, + target_type=None, + provisioning_state=None, + nfs3=None, + clfs=None, + unknown=None, + no_wait=False): + if isinstance(unknown, str): + unknown = json.loads(unknown) + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + target_base_type=target_base_type, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + clfs=clfs, + unknown=unknown) + + +def storagecache_storage_target_update(client, + resource_group_name, + cache_name, + storage_target_name, + target_base_type=None, + junctions=None, + target_type=None, + provisioning_state=None, + nfs3=None, + clfs=None, + unknown=None, + no_wait=False): + if isinstance(unknown, str): + unknown = json.loads(unknown) + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + target_base_type=target_base_type, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + clfs=clfs, + unknown=unknown) + + +def storagecache_storage_target_delete(client, + resource_group_name, + cache_name, + storage_target_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name) diff --git a/src/storagecache/azext_storagecache/manual/__init__.py b/src/storagecache/azext_storagecache/manual/__init__.py new file mode 100644 index 00000000000..ee0c4f36bd0 --- /dev/null +++ b/src/storagecache/azext_storagecache/manual/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/tests/__init__.py b/src/storagecache/azext_storagecache/tests/__init__.py new file mode 100644 index 00000000000..5f8f1fd97ad --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/__init__.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +import inspect +import os +import sys +import traceback +from azure.core.exceptions import AzureError +from azure.cli.testsdk.exceptions import CliTestError, CliExecutionError, JMESPathCheckAssertionError + + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) +exceptions = [] + + +def try_manual(func): + def import_manual_function(origin_func): + from importlib import import_module + decorated_path = inspect.getfile(origin_func) + module_path = __path__[0] + if not decorated_path.startswith(module_path): + raise Exception("Decorator can only be used in submodules!") + manual_path = os.path.join( + decorated_path[module_path.rfind(os.path.sep) + 1:]) + manual_file_path, manual_file_name = os.path.split(manual_path) + module_name, _ = os.path.splitext(manual_file_name) + manual_module = "..manual." + \ + ".".join(manual_file_path.split(os.path.sep) + [module_name, ]) + return getattr(import_module(manual_module, package=__name__), origin_func.__name__) + + def get_func_to_call(): + func_to_call = func + try: + func_to_call = import_manual_function(func) + print("Found manual override for {}(...)".format(func.__name__)) + except (ImportError, AttributeError): + pass + return func_to_call + + def wrapper(*args, **kwargs): + func_to_call = get_func_to_call() + print("running {}()...".format(func.__name__)) + try: + return func_to_call(*args, **kwargs) + except (AssertionError, AzureError, CliTestError, CliExecutionError, JMESPathCheckAssertionError) as e: + print("--------------------------------------") + print("step exception: ", e) + print("--------------------------------------", file=sys.stderr) + print("step exception in {}: {}".format(func.__name__, e), file=sys.stderr) + traceback.print_exc() + exceptions.append((func.__name__, sys.exc_info())) + + if inspect.isclass(func): + return get_func_to_call() + return wrapper + + +def raise_if(): + if exceptions: + if len(exceptions) <= 1: + raise exceptions[0][1][1] + message = "{}\nFollowed with exceptions in other steps:\n".format(str(exceptions[0][1][1])) + message += "\n".join(["{}: {}".format(h[0], h[1][1]) for h in exceptions[1:]]) + raise exceptions[0][1][0](message).with_traceback(exceptions[0][1][2]) diff --git a/src/storagecache/azext_storagecache/tests/latest/__init__.py b/src/storagecache/azext_storagecache/tests/latest/__init__.py new file mode 100644 index 00000000000..ee0c4f36bd0 --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/latest/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/tests/latest/preparers.py b/src/storagecache/azext_storagecache/tests/latest/preparers.py new file mode 100644 index 00000000000..4702355b2bd --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/latest/preparers.py @@ -0,0 +1,159 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import os +from datetime import datetime +from azure_devtools.scenario_tests import SingleValueReplacer +from azure.cli.testsdk.preparers import NoTrafficRecordingPreparer +from azure.cli.testsdk.exceptions import CliTestError +from azure.cli.testsdk.reverse_dependency import get_dummy_cli + + +KEY_RESOURCE_GROUP = 'rg' +KEY_VIRTUAL_NETWORK = 'vnet' +KEY_VNET_SUBNET = 'subnet' +KEY_VNET_NIC = 'nic' + + +class VirtualNetworkPreparer(NoTrafficRecordingPreparer, SingleValueReplacer): + def __init__(self, name_prefix='clitest.vn', + parameter_name='virtual_network', + resource_group_name=None, + resource_group_key=KEY_RESOURCE_GROUP, + dev_setting_name='AZURE_CLI_TEST_DEV_VIRTUAL_NETWORK_NAME', + random_name_length=24, key=KEY_VIRTUAL_NETWORK): + if ' ' in name_prefix: + raise CliTestError( + 'Error: Space character in name prefix \'%s\'' % name_prefix) + super(VirtualNetworkPreparer, self).__init__( + name_prefix, random_name_length) + self.cli_ctx = get_dummy_cli() + self.parameter_name = parameter_name + self.key = key + self.resource_group_name = resource_group_name + self.resource_group_key = resource_group_key + self.dev_setting_name = os.environ.get(dev_setting_name, None) + + def create_resource(self, name, **_): + if self.dev_setting_name: + return {self.parameter_name: self.dev_setting_name, } + + if not self.resource_group_name: + self.resource_group_name = self.test_class_instance.kwargs.get( + self.resource_group_key) + if not self.resource_group_name: + raise CliTestError("Error: No resource group configured!") + + tags = {'product': 'azurecli', 'cause': 'automation', + 'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')} + if 'ENV_JOB_NAME' in os.environ: + tags['job'] = os.environ['ENV_JOB_NAME'] + tags = ' '.join(['{}={}'.format(key, value) + for key, value in tags.items()]) + template = 'az network vnet create --resource-group {} --name {} --subnet-name default --tag ' + tags + self.live_only_execute(self.cli_ctx, template.format( + self.resource_group_name, name)) + + self.test_class_instance.kwargs[self.key] = name + return {self.parameter_name: name} + + def remove_resource(self, name, **_): + # delete vnet if test is being recorded and if the vnet is not a dev rg + if not self.dev_setting_name: + self.live_only_execute( + self.cli_ctx, + 'az network vnet delete --name {} --resource-group {}'.format(name, self.resource_group_name)) + + +class VnetSubnetPreparer(NoTrafficRecordingPreparer, SingleValueReplacer): + def __init__(self, name_prefix='clitest.vn', + parameter_name='subnet', + resource_group_key=KEY_RESOURCE_GROUP, + vnet_key=KEY_VIRTUAL_NETWORK, + address_prefixes="11.0.0.0/24", + dev_setting_name='AZURE_CLI_TEST_DEV_VNET_SUBNET_NAME', + key=KEY_VNET_SUBNET): + if ' ' in name_prefix: + raise CliTestError( + 'Error: Space character in name prefix \'%s\'' % name_prefix) + super(VnetSubnetPreparer, self).__init__(name_prefix, 15) + self.cli_ctx = get_dummy_cli() + self.parameter_name = parameter_name + self.key = key + self.resource_group = [resource_group_key, None] + self.vnet = [vnet_key, None] + self.address_prefixes = address_prefixes + self.dev_setting_name = os.environ.get(dev_setting_name, None) + + def create_resource(self, name, **_): + if self.dev_setting_name: + return {self.parameter_name: self.dev_setting_name, } + + if not self.resource_group[1]: + self.resource_group[1] = self.test_class_instance.kwargs.get( + self.resource_group[0]) + if not self.resource_group[1]: + raise CliTestError("Error: No resource group configured!") + if not self.vnet[1]: + self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0]) + if not self.vnet[1]: + raise CliTestError("Error: No vnet configured!") + + self.test_class_instance.kwargs[self.key] = 'default' + return {self.parameter_name: name} + + def remove_resource(self, name, **_): + pass + + +class VnetNicPreparer(NoTrafficRecordingPreparer, SingleValueReplacer): + def __init__(self, name_prefix='clitest.nic', + parameter_name='subnet', + resource_group_key=KEY_RESOURCE_GROUP, + vnet_key=KEY_VIRTUAL_NETWORK, + dev_setting_name='AZURE_CLI_TEST_DEV_VNET_NIC_NAME', + key=KEY_VNET_NIC): + if ' ' in name_prefix: + raise CliTestError( + 'Error: Space character in name prefix \'%s\'' % name_prefix) + super(VnetNicPreparer, self).__init__(name_prefix, 15) + self.cli_ctx = get_dummy_cli() + self.parameter_name = parameter_name + self.key = key + self.resource_group = [resource_group_key, None] + self.vnet = [vnet_key, None] + self.dev_setting_name = os.environ.get(dev_setting_name, None) + + def create_resource(self, name, **_): + if self.dev_setting_name: + return {self.parameter_name: self.dev_setting_name, } + + if not self.resource_group[1]: + self.resource_group[1] = self.test_class_instance.kwargs.get( + self.resource_group[0]) + if not self.resource_group[1]: + raise CliTestError("Error: No resource group configured!") + if not self.vnet[1]: + self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0]) + if not self.vnet[1]: + raise CliTestError("Error: No vnet configured!") + + template = 'az network nic create --resource-group {} --name {} --vnet-name {} --subnet default ' + self.live_only_execute(self.cli_ctx, template.format( + self.resource_group[1], name, self.vnet[1])) + + self.test_class_instance.kwargs[self.key] = name + return {self.parameter_name: name} + + def remove_resource(self, name, **_): + if not self.dev_setting_name: + self.live_only_execute( + self.cli_ctx, + 'az network nic delete --name {} --resource-group {}'.format(name, self.resource_group[1])) diff --git a/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py b/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py new file mode 100644 index 00000000000..9836affcb87 --- /dev/null +++ b/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py @@ -0,0 +1,240 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import os +from azure.cli.testsdk import ScenarioTest +from .. import try_manual, raise_if +from azure.cli.testsdk import ResourceGroupPreparer +from .preparers import VirtualNetworkPreparer + + +TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..')) + + +@try_manual +def setup(test, rg): + pass + + +# EXAMPLE: /AscOperations/get/AscOperations_Get +@try_manual +def step__ascoperations_get_ascoperations_get(test, rg): + test.cmd('az storagecache asc-operation show ' + '--operation-id "testoperationid" ' + '--location "West US"', + checks=[]) + + +# EXAMPLE: /Caches/put/Caches_CreateOrUpdate +@try_manual +def step__caches_put_caches_createorupdate(test, rg): + test.cmd('az storagecache cache create ' + '--location "westus" ' + '--cache-size-gb 3072 ' + '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork' + 's/{vn}/subnets/default" ' + '--sku-name "Standard_2G" ' + '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/get/Caches_Get +@try_manual +def step__caches_get_caches_get(test, rg): + test.cmd('az storagecache cache show ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/get/Caches_List +@try_manual +def step__caches_get_caches_list(test, rg): + test.cmd('az storagecache cache list ' + '-g ""', + checks=[]) + + +# EXAMPLE: /Caches/get/Caches_ListByResourceGroup +@try_manual +def step__caches_get_caches_listbyresourcegroup(test, rg): + test.cmd('az storagecache cache list ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_Flush +@try_manual +def step__caches_post_caches_flush(test, rg): + test.cmd('az storagecache cache flush ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_Start +@try_manual +def step__caches_post_caches_start(test, rg): + test.cmd('az storagecache cache start ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_Stop +@try_manual +def step__caches_post_caches_stop(test, rg): + test.cmd('az storagecache cache stop ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/post/Caches_UpgradeFirmware +@try_manual +def step__caches_post_caches_upgradefirmware(test, rg): + test.cmd('az storagecache cache upgrade-firmware ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Caches/patch/Caches_Update +@try_manual +def step__caches_patch_caches_update(test, rg): + test.cmd('az storagecache cache update ' + '--location "westus" ' + '--cache-size-gb 3072 ' + '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork' + 's/{vn}/subnets/default" ' + '--sku-name "Standard_2G" ' + '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /Skus/get/Skus_List +@try_manual +def step__skus_get_skus_list(test, rg): + test.cmd('az storagecache sku list', + checks=[]) + + +# EXAMPLE: /StorageTargets/put/StorageTargets_CreateOrUpdate +@try_manual +def step__storagetargets_put_storagetargets_createorupdate(test, rg): + test.cmd('az storagecache storage-target create ' + '--cache-name "sc1" ' + '--resource-group "{rg}" ' + '--name "{st1}" ' + '--junctions namespace-path="/path/on/cache" nfs-export="exp1" target-path="/path/on/exp1" ' + '--junctions namespace-path="/path2/on/cache" nfs-export="exp2" target-path="/path2/on/exp2" ' + '--nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" ' + '--target-type "nfs3"', + checks=[]) + test.cmd('az storagecache storage-target wait --created ' + '--resource-group "{rg}" ' + '--name "{st1}"', + checks=[]) + + +# EXAMPLE: /StorageTargets/get/StorageTargets_Get +@try_manual +def step__storagetargets_get_storagetargets_get(test, rg): + test.cmd('az storagecache storage-target show ' + '--cache-name "sc1" ' + '--resource-group "{rg}" ' + '--name "{st1}"', + checks=[]) + + +# EXAMPLE: /StorageTargets/get/StorageTargets_List +@try_manual +def step__storagetargets_get_storagetargets_list(test, rg): + test.cmd('az storagecache storage-target list ' + '--cache-name "sc1" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /UsageModels/get/UsageModels_List +@try_manual +def step__usagemodels_get_usagemodels_list(test, rg): + test.cmd('az storagecache usage-model list', + checks=[]) + + +# EXAMPLE: /Caches/delete/Caches_Delete +@try_manual +def step__caches_delete_caches_delete(test, rg): + test.cmd('az storagecache cache delete ' + '--cache-name "sc" ' + '--resource-group "{rg}"', + checks=[]) + + +# EXAMPLE: /StorageTargets/delete/StorageTargets_Delete +@try_manual +def step__storagetargets_delete_storagetargets_delete(test, rg): + test.cmd('az storagecache storage-target delete ' + '--cache-name "sc1" ' + '--resource-group "{rg}" ' + '--name "{st1}"', + checks=[]) + + +@try_manual +def cleanup(test, rg): + pass + + +@try_manual +def call_scenario(test, rg): + setup(test, rg) + step__ascoperations_get_ascoperations_get(test, rg) + step__caches_put_caches_createorupdate(test, rg) + step__caches_get_caches_get(test, rg) + step__caches_get_caches_list(test, rg) + step__caches_get_caches_listbyresourcegroup(test, rg) + step__caches_post_caches_flush(test, rg) + step__caches_post_caches_start(test, rg) + step__caches_post_caches_stop(test, rg) + step__caches_post_caches_upgradefirmware(test, rg) + step__caches_patch_caches_update(test, rg) + step__skus_get_skus_list(test, rg) + step__storagetargets_put_storagetargets_createorupdate(test, rg) + step__storagetargets_get_storagetargets_get(test, rg) + step__storagetargets_get_storagetargets_list(test, rg) + step__usagemodels_get_usagemodels_list(test, rg) + step__caches_delete_caches_delete(test, rg) + step__storagetargets_delete_storagetargets_delete(test, rg) + cleanup(test, rg) + + +@try_manual +class StorageCacheManagementClientScenarioTest(ScenarioTest): + + @ResourceGroupPreparer(name_prefix='cliteststoragecache_scgroup'[:7], key='rg', parameter_name='rg') + @VirtualNetworkPreparer(name_prefix='cliteststoragecache_scvnet'[:7], key='vn', resource_group_key='rg') + def test_storagecache(self, rg): + + self.kwargs.update({ + 'subscription_id': self.get_subscription_id() + }) + + self.kwargs.update({ + 'st1': 'st1', + }) + + call_scenario(self, rg) + raise_if() diff --git a/src/storagecache/azext_storagecache/vendored_sdks/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/__init__.py new file mode 100644 index 00000000000..ee0c4f36bd0 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/__init__.py @@ -0,0 +1,12 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py new file mode 100644 index 00000000000..a7989654b6f --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._storage_cache_management_client import StorageCacheManagementClient +__all__ = ['StorageCacheManagementClient'] + +try: + from ._patch import patch_sdk + patch_sdk() +except ImportError: + pass diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py new file mode 100644 index 00000000000..9c1b0ef356e --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + + from azure.core.credentials import TokenCredential + +VERSION = "unknown" + +class StorageCacheManagementClientConfiguration(Configuration): + """Configuration for StorageCacheManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + """ + + def __init__( + self, + credential, # type: "TokenCredential" + subscription_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> None + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + super(StorageCacheManagementClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = "2020-03-01" + self.credential_scopes = ['https://management.azure.com/.default'] + self.credential_scopes.extend(kwargs.pop('credential_scopes', [])) + kwargs.setdefault('sdk_moniker', 'storagecachemanagementclient/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py new file mode 100644 index 00000000000..f0c608155a8 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import TYPE_CHECKING + +from azure.mgmt.core import ARMPipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Optional + + from azure.core.credentials import TokenCredential + +from ._configuration import StorageCacheManagementClientConfiguration +from .operations import OperationOperations +from .operations import SkuOperations +from .operations import UsageModelOperations +from .operations import AscOperationOperations +from .operations import CacheOperations +from .operations import StorageTargetOperations +from . import models + + +class StorageCacheManagementClient(object): + """A Storage Cache provides scalable caching service for NAS clients, serving data from either NFSv3 or Blob at-rest storage (referred to as "Storage Targets"). These operations allow you to manage Caches. + + :ivar operation: OperationOperations operations + :vartype operation: storage_cache_management_client.operations.OperationOperations + :ivar sku: SkuOperations operations + :vartype sku: storage_cache_management_client.operations.SkuOperations + :ivar usage_model: UsageModelOperations operations + :vartype usage_model: storage_cache_management_client.operations.UsageModelOperations + :ivar asc_operation: AscOperationOperations operations + :vartype asc_operation: storage_cache_management_client.operations.AscOperationOperations + :ivar cache: CacheOperations operations + :vartype cache: storage_cache_management_client.operations.CacheOperations + :ivar storage_target: StorageTargetOperations operations + :vartype storage_target: storage_cache_management_client.operations.StorageTargetOperations + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + :param str base_url: Service URL + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + def __init__( + self, + credential, # type: "TokenCredential" + subscription_id, # type: str + base_url=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> None + if not base_url: + base_url = 'https://management.azure.com' + self._config = StorageCacheManagementClientConfiguration(credential, subscription_id, **kwargs) + self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.operation = OperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.sku = SkuOperations( + self._client, self._config, self._serialize, self._deserialize) + self.usage_model = UsageModelOperations( + self._client, self._config, self._serialize, self._deserialize) + self.asc_operation = AscOperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.cache = CacheOperations( + self._client, self._config, self._serialize, self._deserialize) + self.storage_target = StorageTargetOperations( + self._client, self._config, self._serialize, self._deserialize) + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> StorageCacheManagementClient + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py new file mode 100644 index 00000000000..10668c3c519 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py @@ -0,0 +1,10 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._storage_cache_management_client_async import StorageCacheManagementClient +__all__ = ['StorageCacheManagementClient'] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration_async.py new file mode 100644 index 00000000000..93f7107aa8d --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration_async.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +VERSION = "unknown" + +class StorageCacheManagementClientConfiguration(Configuration): + """Configuration for StorageCacheManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + **kwargs: Any + ) -> None: + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + super(StorageCacheManagementClientConfiguration, self).__init__(**kwargs) + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = "2020-03-01" + self.credential_scopes = ['https://management.azure.com/.default'] + self.credential_scopes.extend(kwargs.pop('credential_scopes', [])) + kwargs.setdefault('sdk_moniker', 'storagecachemanagementclient/{}'.format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, + **kwargs: Any + ) -> None: + self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get('authentication_policy') + if self.credential and not self.authentication_policy: + self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client_async.py new file mode 100644 index 00000000000..2a4cc3bebb3 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client_async.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Optional, TYPE_CHECKING + +from azure.mgmt.core import AsyncARMPipelineClient +from msrest import Deserializer, Serializer + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + +from ._configuration_async import StorageCacheManagementClientConfiguration +from .operations_async import OperationOperations +from .operations_async import SkuOperations +from .operations_async import UsageModelOperations +from .operations_async import AscOperationOperations +from .operations_async import CacheOperations +from .operations_async import StorageTargetOperations +from .. import models + + +class StorageCacheManagementClient(object): + """A Storage Cache provides scalable caching service for NAS clients, serving data from either NFSv3 or Blob at-rest storage (referred to as "Storage Targets"). These operations allow you to manage Caches. + + :ivar operation: OperationOperations operations + :vartype operation: storage_cache_management_client.aio.operations_async.OperationOperations + :ivar sku: SkuOperations operations + :vartype sku: storage_cache_management_client.aio.operations_async.SkuOperations + :ivar usage_model: UsageModelOperations operations + :vartype usage_model: storage_cache_management_client.aio.operations_async.UsageModelOperations + :ivar asc_operation: AscOperationOperations operations + :vartype asc_operation: storage_cache_management_client.aio.operations_async.AscOperationOperations + :ivar cache: CacheOperations operations + :vartype cache: storage_cache_management_client.aio.operations_async.CacheOperations + :ivar storage_target: StorageTargetOperations operations + :vartype storage_target: storage_cache_management_client.aio.operations_async.StorageTargetOperations + :param credential: Credential needed for the client to connect to Azure. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. + :type subscription_id: str + :param str base_url: Service URL + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: Optional[str] = None, + **kwargs: Any + ) -> None: + if not base_url: + base_url = 'https://management.azure.com' + self._config = StorageCacheManagementClientConfiguration(credential, subscription_id, **kwargs) + self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + + self.operation = OperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.sku = SkuOperations( + self._client, self._config, self._serialize, self._deserialize) + self.usage_model = UsageModelOperations( + self._client, self._config, self._serialize, self._deserialize) + self.asc_operation = AscOperationOperations( + self._client, self._config, self._serialize, self._deserialize) + self.cache = CacheOperations( + self._client, self._config, self._serialize, self._deserialize) + self.storage_target = StorageTargetOperations( + self._client, self._config, self._serialize, self._deserialize) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "StorageCacheManagementClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/__init__.py new file mode 100644 index 00000000000..e193101b9b8 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operation_operations_async import OperationOperations +from ._sku_operations_async import SkuOperations +from ._usage_model_operations_async import UsageModelOperations +from ._asc_operation_operations_async import AscOperationOperations +from ._cache_operations_async import CacheOperations +from ._storage_target_operations_async import StorageTargetOperations + +__all__ = [ + 'OperationOperations', + 'SkuOperations', + 'UsageModelOperations', + 'AscOperationOperations', + 'CacheOperations', + 'StorageTargetOperations', +] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_asc_operation_operations_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_asc_operation_operations_async.py new file mode 100644 index 00000000000..4c40e9a15f5 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_asc_operation_operations_async.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class AscOperationOperations: + """AscOperationOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + async def get( + self, + location: str, + operation_id: str, + **kwargs + ) -> "models.AscOperation": + """Gets the status of an asynchronous operation for the Azure HPC cache. + + :param location: The region name which the operation will lookup into. + :type location: str + :param operation_id: The operation id which uniquely identifies the asynchronous operation. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AscOperation, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.AscOperation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.AscOperation"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'location': self._serialize.url("location", location, 'str'), + 'operationId': self._serialize.url("operation_id", operation_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('AscOperation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/locations/{location}/ascOperations/{operationId}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_cache_operations_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_cache_operations_async.py new file mode 100644 index 00000000000..275af2f9ae7 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_cache_operations_async.py @@ -0,0 +1,1021 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.core.polling import AsyncNoPolling, AsyncPollingMethod, async_poller +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class CacheOperations: + """CacheOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.CachesListResult"]: + """Returns all Caches the user has access to under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches'} # type: ignore + + def list_by_resource_group( + self, + resource_group_name: str, + **kwargs + ) -> AsyncIterable["models.CachesListResult"]: + """Returns all Caches the user has access to under a resource group. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches'} # type: ignore + + async def _delete_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def delete( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + """Schedules a Cache for deletion. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: object, or the result of cls(response) + :rtype: object + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def get( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> "models.Cache": + """Returns a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + cache_name: str, + tags: Optional[object] = None, + location: Optional[str] = None, + name: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["models.CacheUpgradeStatus"] = None, + network_settings: Optional["models.CacheNetworkSettings"] = None, + encryption_settings: Optional["models.CacheEncryptionSettings"] = None, + security_settings: Optional["models.CacheSecuritySettings"] = None, + type: Optional[Union[str, "models.CacheIdentityType"]] = None, + **kwargs + ) -> "models.Cache": + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + _cache = models.Cache(tags=tags, location=location, name_sku_name=name, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, network_settings=network_settings, encryption_settings=encryption_settings, security_settings=security_settings, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + if _cache is not None: + body_content = self._serialize.body(_cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Cache', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def create_or_update( + self, + resource_group_name: str, + cache_name: str, + tags: Optional[object] = None, + location: Optional[str] = None, + name: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["models.CacheUpgradeStatus"] = None, + network_settings: Optional["models.CacheNetworkSettings"] = None, + encryption_settings: Optional["models.CacheEncryptionSettings"] = None, + security_settings: Optional["models.CacheSecuritySettings"] = None, + type: Optional[Union[str, "models.CacheIdentityType"]] = None, + **kwargs + ) -> "models.Cache": + """Create or update a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param name: SKU name for this Cache. + :type name: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param network_settings: Specifies network settings of the cache. + :type network_settings: ~storage_cache_management_client.models.CacheNetworkSettings + :param encryption_settings: Specifies encryption settings of the cache. + :type encryption_settings: ~storage_cache_management_client.models.CacheEncryptionSettings + :param security_settings: Specifies security settings of the cache. + :type security_settings: ~storage_cache_management_client.models.CacheSecuritySettings + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + name=name, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=upgrade_status, + network_settings=network_settings, + encryption_settings=encryption_settings, + security_settings=security_settings, + type=type, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def update( + self, + resource_group_name: str, + cache_name: str, + tags: Optional[object] = None, + location: Optional[str] = None, + name: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["models.CacheUpgradeStatus"] = None, + network_settings: Optional["models.CacheNetworkSettings"] = None, + encryption_settings: Optional["models.CacheEncryptionSettings"] = None, + security_settings: Optional["models.CacheSecuritySettings"] = None, + type: Optional[Union[str, "models.CacheIdentityType"]] = None, + **kwargs + ) -> "models.Cache": + """Update a Cache instance. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param name: SKU name for this Cache. + :type name: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param network_settings: Specifies network settings of the cache. + :type network_settings: ~storage_cache_management_client.models.CacheNetworkSettings + :param encryption_settings: Specifies encryption settings of the cache. + :type encryption_settings: ~storage_cache_management_client.models.CacheEncryptionSettings + :param security_settings: Specifies security settings of the cache. + :type security_settings: ~storage_cache_management_client.models.CacheSecuritySettings + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + _cache = models.Cache(tags=tags, location=location, name_sku_name=name, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, network_settings=network_settings, encryption_settings=encryption_settings, security_settings=security_settings, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self.update.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + if _cache is not None: + body_content = self._serialize.body(_cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + async def _flush_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._flush_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _flush_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + async def flush( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + """Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will see errors returned until the flush is complete. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: object, or the result of cls(response) + :rtype: object + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._flush_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + flush.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + async def _start_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._start_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + async def start( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + """Tells a Stopped state Cache to transition to Active state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: object, or the result of cls(response) + :rtype: object + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._start_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + async def _stop_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._stop_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + async def stop( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + """Tells an Active Cache to transition to Stopped state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: object, or the result of cls(response) + :rtype: object + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._stop_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + async def _upgrade_firmware_initial( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._upgrade_firmware_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 201: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _upgrade_firmware_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore + + async def upgrade_firmware( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> object: + """Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no effect. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: object, or the result of cls(response) + :rtype: object + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._upgrade_firmware_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + upgrade_firmware.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_operation_operations_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_operation_operations_async.py new file mode 100644 index 00000000000..16f19893c7c --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_operation_operations_async.py @@ -0,0 +1,101 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class OperationOperations: + """OperationOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.ApiOperationListResult"]: + """Lists all of the available Resource Provider operations. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ApiOperationListResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ApiOperationListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ApiOperationListResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('ApiOperationListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/providers/Microsoft.StorageCache/operations'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_sku_operations_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_sku_operations_async.py new file mode 100644 index 00000000000..10f85584a5d --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_sku_operations_async.py @@ -0,0 +1,105 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class SkuOperations: + """SkuOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.ResourceSkusResult"]: + """Get the list of StorageCache.Cache SKUs available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ResourceSkusResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ResourceSkusResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceSkusResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('ResourceSkusResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_storage_target_operations_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_storage_target_operations_async.py new file mode 100644 index 00000000000..0a201ab76f1 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_storage_target_operations_async.py @@ -0,0 +1,443 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.core.polling import AsyncNoPolling, AsyncPollingMethod, async_poller +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class StorageTargetOperations: + """StorageTargetOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list_by_cache( + self, + resource_group_name: str, + cache_name: str, + **kwargs + ) -> AsyncIterable["models.StorageTargetsResult"]: + """Returns a list of Storage Targets for the specified Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either StorageTargetsResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.StorageTargetsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTargetsResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_cache.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('StorageTargetsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list_by_cache.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets'} # type: ignore + + async def _delete_initial( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + **kwargs + ) -> object: + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def delete( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + **kwargs + ) -> object: + """Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache is down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache is healthy again. Note that if the Cache has data to flush to the Storage Target, the data will be flushed before the Storage Target will be deleted. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of Storage Target. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: object, or the result of cls(response) + :rtype: object + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def get( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + **kwargs + ) -> "models.StorageTarget": + """Returns a Storage Target from a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageTarget, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.StorageTarget + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def _create_or_update_initial( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + target_base_type: Optional[Union[str, "models.StorageTargetType"]] = None, + junctions: Optional[List["models.NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + nfs3: Optional["models.Nfs3Target"] = None, + clfs: Optional["models.ClfsTarget"] = None, + unknown: Optional["models.UnknownTarget"] = None, + **kwargs + ) -> "models.StorageTarget": + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + _storagetarget = models.StorageTarget(target_base_type=target_base_type, junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + if _storagetarget is not None: + body_content = self._serialize.body(_storagetarget, 'StorageTarget') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + async def create_or_update( + self, + resource_group_name: str, + cache_name: str, + storage_target_name: str, + target_base_type: Optional[Union[str, "models.StorageTargetType"]] = None, + junctions: Optional[List["models.NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None, + nfs3: Optional["models.Nfs3Target"] = None, + clfs: Optional["models.ClfsTarget"] = None, + unknown: Optional["models.UnknownTarget"] = None, + **kwargs + ) -> "models.StorageTarget": + """Create or update a Storage Target. This operation is allowed at any time, but if the Cache is down or unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :param target_base_type: Type of the Storage Target. + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: StorageTarget, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.StorageTarget + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + target_base_type=target_base_type, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + clfs=clfs, + unknown=unknown, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_usage_model_operations_async.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_usage_model_operations_async.py new file mode 100644 index 00000000000..309a2669eb9 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations_async/_usage_model_operations_async.py @@ -0,0 +1,105 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar +import warnings + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest +from azure.mgmt.core.exceptions import ARMErrorFormat + +from ... import models + +T = TypeVar('T') +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + +class UsageModelOperations: + """UsageModelOperations async operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer) -> None: + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs + ) -> AsyncIterable["models.UsageModelsResult"]: + """Get the list of Cache Usage Models available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either UsageModelsResult or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.UsageModelsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.UsageModelsResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize('UsageModelsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py new file mode 100644 index 00000000000..00d08240cf4 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py @@ -0,0 +1,130 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import ApiOperation + from ._models_py3 import ApiOperationDisplay + from ._models_py3 import ApiOperationListResult + from ._models_py3 import AscOperation + from ._models_py3 import Cache + from ._models_py3 import CacheEncryptionSettings + from ._models_py3 import CacheHealth + from ._models_py3 import CacheNetworkSettings + from ._models_py3 import CacheSecuritySettings + from ._models_py3 import CacheUpgradeStatus + from ._models_py3 import CachesListResult + from ._models_py3 import ClfsTarget + from ._models_py3 import ClfsTargetProperties + from ._models_py3 import CloudErrorBody + from ._models_py3 import ErrorResponse + from ._models_py3 import KeyVaultKeyReference + from ._models_py3 import KeyVaultKeyReferenceSourceVault + from ._models_py3 import NamespaceJunction + from ._models_py3 import Nfs3Target + from ._models_py3 import Nfs3TargetProperties + from ._models_py3 import ResourceSku + from ._models_py3 import ResourceSkuCapabilities + from ._models_py3 import ResourceSkuLocationInfo + from ._models_py3 import ResourceSkusResult + from ._models_py3 import Restriction + from ._models_py3 import StorageTarget + from ._models_py3 import StorageTargetProperties + from ._models_py3 import StorageTargetResource + from ._models_py3 import StorageTargetsResult + from ._models_py3 import UnknownTarget + from ._models_py3 import UnknownTargetProperties + from ._models_py3 import UsageModel + from ._models_py3 import UsageModelDisplay + from ._models_py3 import UsageModelsResult +except (SyntaxError, ImportError): + from ._models import ApiOperation # type: ignore + from ._models import ApiOperationDisplay # type: ignore + from ._models import ApiOperationListResult # type: ignore + from ._models import AscOperation # type: ignore + from ._models import Cache # type: ignore + from ._models import CacheEncryptionSettings # type: ignore + from ._models import CacheHealth # type: ignore + from ._models import CacheNetworkSettings # type: ignore + from ._models import CacheSecuritySettings # type: ignore + from ._models import CacheUpgradeStatus # type: ignore + from ._models import CachesListResult # type: ignore + from ._models import ClfsTarget # type: ignore + from ._models import ClfsTargetProperties # type: ignore + from ._models import CloudErrorBody # type: ignore + from ._models import ErrorResponse # type: ignore + from ._models import KeyVaultKeyReference # type: ignore + from ._models import KeyVaultKeyReferenceSourceVault # type: ignore + from ._models import NamespaceJunction # type: ignore + from ._models import Nfs3Target # type: ignore + from ._models import Nfs3TargetProperties # type: ignore + from ._models import ResourceSku # type: ignore + from ._models import ResourceSkuCapabilities # type: ignore + from ._models import ResourceSkuLocationInfo # type: ignore + from ._models import ResourceSkusResult # type: ignore + from ._models import Restriction # type: ignore + from ._models import StorageTarget # type: ignore + from ._models import StorageTargetProperties # type: ignore + from ._models import StorageTargetResource # type: ignore + from ._models import StorageTargetsResult # type: ignore + from ._models import UnknownTarget # type: ignore + from ._models import UnknownTargetProperties # type: ignore + from ._models import UsageModel # type: ignore + from ._models import UsageModelDisplay # type: ignore + from ._models import UsageModelsResult # type: ignore + +from ._storage_cache_management_client_enums import ( + CacheIdentityType, + FirmwareStatusType, + HealthStateType, + ProvisioningStateType, + ReasonCode, + StorageTargetType, +) + +__all__ = [ + 'ApiOperation', + 'ApiOperationDisplay', + 'ApiOperationListResult', + 'AscOperation', + 'Cache', + 'CacheEncryptionSettings', + 'CacheHealth', + 'CacheNetworkSettings', + 'CacheSecuritySettings', + 'CacheUpgradeStatus', + 'CachesListResult', + 'ClfsTarget', + 'ClfsTargetProperties', + 'CloudErrorBody', + 'ErrorResponse', + 'KeyVaultKeyReference', + 'KeyVaultKeyReferenceSourceVault', + 'NamespaceJunction', + 'Nfs3Target', + 'Nfs3TargetProperties', + 'ResourceSku', + 'ResourceSkuCapabilities', + 'ResourceSkuLocationInfo', + 'ResourceSkusResult', + 'Restriction', + 'StorageTarget', + 'StorageTargetProperties', + 'StorageTargetResource', + 'StorageTargetsResult', + 'UnknownTarget', + 'UnknownTargetProperties', + 'UsageModel', + 'UsageModelDisplay', + 'UsageModelsResult', + 'CacheIdentityType', + 'FirmwareStatusType', + 'HealthStateType', + 'ProvisioningStateType', + 'ReasonCode', + 'StorageTargetType', +] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py new file mode 100644 index 00000000000..ef3f9daa772 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py @@ -0,0 +1,1141 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import msrest.serialization + + +class ApiOperation(msrest.serialization.Model): + """REST API operation description: see https://github.com/Azure/azure-rest-api-specs/blob/master/documentation/openapi-authoring-automated-guidelines.md#r3023-operationsapiimplementation. + + :param display: The object that represents the operation. + :type display: ~storage_cache_management_client.models.ApiOperationDisplay + :param name: Operation name: {provider}/{resource}/{operation}. + :type name: str + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'ApiOperationDisplay'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperation, self).__init__(**kwargs) + self.display = kwargs.get('display', None) + self.name = kwargs.get('name', None) + + +class ApiOperationDisplay(msrest.serialization.Model): + """The object that represents the operation. + + :param operation: Operation type: Read, write, delete, etc. + :type operation: str + :param provider: Service provider: Microsoft.StorageCache. + :type provider: str + :param resource: Resource on which the operation is performed: Cache, etc. + :type resource: str + """ + + _attribute_map = { + 'operation': {'key': 'operation', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'resource': {'key': 'resource', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperationDisplay, self).__init__(**kwargs) + self.operation = kwargs.get('operation', None) + self.provider = kwargs.get('provider', None) + self.resource = kwargs.get('resource', None) + + +class ApiOperationListResult(msrest.serialization.Model): + """Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results. + + :param next_link: URL to get the next set of operation list results if there are any. + :type next_link: str + :param value: List of Resource Provider operations supported by the Microsoft.StorageCache + resource provider. + :type value: list[~storage_cache_management_client.models.ApiOperation] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ApiOperation]'}, + } + + def __init__( + self, + **kwargs + ): + super(ApiOperationListResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) + + +class AscOperation(msrest.serialization.Model): + """The status of operation. + + :param id: The operation Id. + :type id: str + :param name: The operation name. + :type name: str + :param start_time: The start time of the operation. + :type start_time: str + :param end_time: The end time of the operation. + :type end_time: str + :param status: The status of the operation. + :type status: str + :param error: The error detail of the operation if any. + :type error: ~storage_cache_management_client.models.ErrorResponse + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'str'}, + 'end_time': {'key': 'endTime', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'ErrorResponse'}, + } + + def __init__( + self, + **kwargs + ): + super(AscOperation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) + self.status = kwargs.get('status', None) + self.error = kwargs.get('error', None) + + +class Cache(msrest.serialization.Model): + """A Cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param tags: A set of tags. ARM tags as name/value pairs. + :type tags: object + :ivar id: Resource ID of the Cache. + :vartype id: str + :param location: Region name string. + :type location: str + :ivar name: Name of Cache. + :vartype name: str + :ivar type: Type of the Cache; Microsoft.StorageCache/Cache. + :vartype type: str + :param name_sku_name: SKU name for this Cache. + :type name_sku_name: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :ivar health: Health of the Cache. + :vartype health: ~storage_cache_management_client.models.CacheHealth + :ivar mount_addresses: Array of IP addresses that can be used by clients mounting this Cache. + :vartype mount_addresses: list[str] + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param network_settings: Specifies network settings of the cache. + :type network_settings: ~storage_cache_management_client.models.CacheNetworkSettings + :param encryption_settings: Specifies encryption settings of the cache. + :type encryption_settings: ~storage_cache_management_client.models.CacheEncryptionSettings + :param security_settings: Specifies security settings of the cache. + :type security_settings: ~storage_cache_management_client.models.CacheSecuritySettings + :ivar principal_id: The principal id of the cache. + :vartype principal_id: str + :ivar tenant_id: The tenant id associated with the cache. + :vartype tenant_id: str + :param type_identity_type: The type of identity used for the cache. Possible values include: + "SystemAssigned", "None". + :type type_identity_type: str or ~storage_cache_management_client.models.CacheIdentityType + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'type': {'readonly': True}, + 'health': {'readonly': True}, + 'mount_addresses': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'tenant_id': {'readonly': True}, + } + + _attribute_map = { + 'tags': {'key': 'tags', 'type': 'object'}, + 'id': {'key': 'id', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'name_sku_name': {'key': 'sku.name', 'type': 'str'}, + 'cache_size_gb': {'key': 'properties.cacheSizeGB', 'type': 'int'}, + 'health': {'key': 'properties.health', 'type': 'CacheHealth'}, + 'mount_addresses': {'key': 'properties.mountAddresses', 'type': '[str]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'subnet': {'key': 'properties.subnet', 'type': 'str'}, + 'upgrade_status': {'key': 'properties.upgradeStatus', 'type': 'CacheUpgradeStatus'}, + 'network_settings': {'key': 'properties.networkSettings', 'type': 'CacheNetworkSettings'}, + 'encryption_settings': {'key': 'properties.encryptionSettings', 'type': 'CacheEncryptionSettings'}, + 'security_settings': {'key': 'properties.securitySettings', 'type': 'CacheSecuritySettings'}, + 'principal_id': {'key': 'identity.principalId', 'type': 'str'}, + 'tenant_id': {'key': 'identity.tenantId', 'type': 'str'}, + 'type_identity_type': {'key': 'identity.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Cache, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.id = None + self.location = kwargs.get('location', None) + self.name = None + self.type = None + self.name_sku_name = kwargs.get('name_sku_name', None) + self.cache_size_gb = kwargs.get('cache_size_gb', None) + self.health = None + self.mount_addresses = None + self.provisioning_state = kwargs.get('provisioning_state', None) + self.subnet = kwargs.get('subnet', None) + self.upgrade_status = kwargs.get('upgrade_status', None) + self.network_settings = kwargs.get('network_settings', None) + self.encryption_settings = kwargs.get('encryption_settings', None) + self.security_settings = kwargs.get('security_settings', None) + self.principal_id = None + self.tenant_id = None + self.type_identity_type = kwargs.get('type_identity_type', None) + + +class CacheEncryptionSettings(msrest.serialization.Model): + """Cache encryption settings. + + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + """ + + _attribute_map = { + 'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultKeyReference'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheEncryptionSettings, self).__init__(**kwargs) + self.key_encryption_key = kwargs.get('key_encryption_key', None) + + +class CacheHealth(msrest.serialization.Model): + """An indication of Cache health. Gives more information about health than just that related to provisioning. + + :param state: List of Cache health states. Possible values include: "Unknown", "Healthy", + "Degraded", "Down", "Transitioning", "Stopping", "Stopped", "Upgrading", "Flushing". + :type state: str or ~storage_cache_management_client.models.HealthStateType + :param status_description: Describes explanation of state. + :type status_description: str + """ + + _attribute_map = { + 'state': {'key': 'state', 'type': 'str'}, + 'status_description': {'key': 'statusDescription', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheHealth, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.status_description = kwargs.get('status_description', None) + + +class CacheNetworkSettings(msrest.serialization.Model): + """Cache network settings. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :ivar utility_addresses: Array of additional IP addresses used by this Cache. + :vartype utility_addresses: list[str] + """ + + _validation = { + 'mtu': {'maximum': 1500, 'minimum': 576}, + 'utility_addresses': {'readonly': True}, + } + + _attribute_map = { + 'mtu': {'key': 'mtu', 'type': 'int'}, + 'utility_addresses': {'key': 'utilityAddresses', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheNetworkSettings, self).__init__(**kwargs) + self.mtu = kwargs.get('mtu', 1500) + self.utility_addresses = None + + +class CacheSecuritySettings(msrest.serialization.Model): + """Cache security settings. + + :param root_squash: root squash of cache property. + :type root_squash: bool + """ + + _attribute_map = { + 'root_squash': {'key': 'rootSquash', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheSecuritySettings, self).__init__(**kwargs) + self.root_squash = kwargs.get('root_squash', None) + + +class CachesListResult(msrest.serialization.Model): + """Result of the request to list Caches. It contains a list of Caches and a URL link to get the next set of results. + + :param next_link: URL to get the next set of Cache list results, if there are any. + :type next_link: str + :param value: List of Caches. + :type value: list[~storage_cache_management_client.models.Cache] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[Cache]'}, + } + + def __init__( + self, + **kwargs + ): + super(CachesListResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) + + +class CacheUpgradeStatus(msrest.serialization.Model): + """Properties describing the software upgrade state of the Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar current_firmware_version: Version string of the firmware currently installed on this + Cache. + :vartype current_firmware_version: str + :ivar firmware_update_status: True if there is a firmware update ready to install on this + Cache. The firmware will automatically be installed after firmwareUpdateDeadline if not + triggered earlier via the upgrade operation. Possible values include: "available", + "unavailable". + :vartype firmware_update_status: str or + ~storage_cache_management_client.models.FirmwareStatusType + :ivar firmware_update_deadline: Time at which the pending firmware update will automatically be + installed on the Cache. + :vartype firmware_update_deadline: ~datetime.datetime + :ivar last_firmware_update: Time of the last successful firmware update. + :vartype last_firmware_update: ~datetime.datetime + :ivar pending_firmware_version: When firmwareUpdateAvailable is true, this field holds the + version string for the update. + :vartype pending_firmware_version: str + """ + + _validation = { + 'current_firmware_version': {'readonly': True}, + 'firmware_update_status': {'readonly': True}, + 'firmware_update_deadline': {'readonly': True}, + 'last_firmware_update': {'readonly': True}, + 'pending_firmware_version': {'readonly': True}, + } + + _attribute_map = { + 'current_firmware_version': {'key': 'currentFirmwareVersion', 'type': 'str'}, + 'firmware_update_status': {'key': 'firmwareUpdateStatus', 'type': 'str'}, + 'firmware_update_deadline': {'key': 'firmwareUpdateDeadline', 'type': 'iso-8601'}, + 'last_firmware_update': {'key': 'lastFirmwareUpdate', 'type': 'iso-8601'}, + 'pending_firmware_version': {'key': 'pendingFirmwareVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheUpgradeStatus, self).__init__(**kwargs) + self.current_firmware_version = None + self.firmware_update_status = None + self.firmware_update_deadline = None + self.last_firmware_update = None + self.pending_firmware_version = None + + +class ClfsTarget(msrest.serialization.Model): + """Properties pertained to ClfsTarget. + + :param target: Resource ID of storage container. + :type target: str + """ + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ClfsTarget, self).__init__(**kwargs) + self.target = kwargs.get('target', None) + + +class StorageTargetProperties(msrest.serialization.Model): + """Properties of the Storage Target. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClfsTargetProperties, Nfs3TargetProperties, UnknownTargetProperties. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + _subtype_map = { + 'target_base_type': {'clfs': 'ClfsTargetProperties', 'nfs3': 'Nfs3TargetProperties', 'unknown': 'UnknownTargetProperties'} + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetProperties, self).__init__(**kwargs) + self.target_base_type = None + self.junctions = kwargs.get('junctions', None) + self.target_type = kwargs.get('target_type', None) + self.provisioning_state = kwargs.get('provisioning_state', None) + self.nfs3 = kwargs.get('nfs3', None) + self.clfs = kwargs.get('clfs', None) + self.unknown = kwargs.get('unknown', None) + + +class ClfsTargetProperties(StorageTargetProperties): + """Storage container for use as a CLFS Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(ClfsTargetProperties, self).__init__(**kwargs) + self.target_base_type = 'clfs' + + +class CloudErrorBody(msrest.serialization.Model): + """An error response. + + :param code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :type code: str + :param details: A list of additional details about the error. + :type details: list[~storage_cache_management_client.models.CloudErrorBody] + :param message: A message describing the error, intended to be suitable for display in a user + interface. + :type message: str + :param target: The target of the particular error. For example, the name of the property in + error. + :type target: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[CloudErrorBody]'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CloudErrorBody, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.details = kwargs.get('details', None) + self.message = kwargs.get('message', None) + self.target = kwargs.get('target', None) + + +class ErrorResponse(msrest.serialization.Model): + """Describes the format of Error response. + + :param code: Error code. + :type code: str + :param message: Error message indicating why the operation failed. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ErrorResponse, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + + +class KeyVaultKeyReference(msrest.serialization.Model): + """Describes a reference to Key Vault Key. + + All required parameters must be populated in order to send to Azure. + + :param key_url: Required. The URL referencing a key encryption key in Key Vault. + :type key_url: str + :param source_vault: Required. Describes a resource Id to source Key Vault. + :type source_vault: ~storage_cache_management_client.models.KeyVaultKeyReferenceSourceVault + """ + + _validation = { + 'key_url': {'required': True}, + 'source_vault': {'required': True}, + } + + _attribute_map = { + 'key_url': {'key': 'keyUrl', 'type': 'str'}, + 'source_vault': {'key': 'sourceVault', 'type': 'KeyVaultKeyReferenceSourceVault'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyVaultKeyReference, self).__init__(**kwargs) + self.key_url = kwargs['key_url'] + self.source_vault = kwargs['source_vault'] + + +class KeyVaultKeyReferenceSourceVault(msrest.serialization.Model): + """Describes a resource Id to source Key Vault. + + :param id: Resource Id. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyVaultKeyReferenceSourceVault, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + + +class NamespaceJunction(msrest.serialization.Model): + """A namespace junction. + + :param namespace_path: Namespace path on a Cache for a Storage Target. + :type namespace_path: str + :param target_path: Path in Storage Target to which namespacePath points. + :type target_path: str + :param nfs_export: NFS export where targetPath exists. + :type nfs_export: str + """ + + _attribute_map = { + 'namespace_path': {'key': 'namespacePath', 'type': 'str'}, + 'target_path': {'key': 'targetPath', 'type': 'str'}, + 'nfs_export': {'key': 'nfsExport', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(NamespaceJunction, self).__init__(**kwargs) + self.namespace_path = kwargs.get('namespace_path', None) + self.target_path = kwargs.get('target_path', None) + self.nfs_export = kwargs.get('nfs_export', None) + + +class Nfs3Target(msrest.serialization.Model): + """Properties pertained to Nfs3Target. + + :param target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + :type target: str + :param usage_model: Identifies the primary usage model to be used for this Storage Target. Get + choices from .../usageModels. + :type usage_model: str + """ + + _validation = { + 'target': {'pattern': r'^[-.0-9a-zA-Z]+$'}, + } + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + 'usage_model': {'key': 'usageModel', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Nfs3Target, self).__init__(**kwargs) + self.target = kwargs.get('target', None) + self.usage_model = kwargs.get('usage_model', None) + + +class Nfs3TargetProperties(StorageTargetProperties): + """An NFSv3 mount point for use as a Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(Nfs3TargetProperties, self).__init__(**kwargs) + self.target_base_type = 'nfs3' + + +class ResourceSku(msrest.serialization.Model): + """A resource SKU. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar resource_type: The type of resource the SKU applies to. + :vartype resource_type: str + :param capabilities: A list of capabilities of this SKU, such as throughput or ops/sec. + :type capabilities: list[~storage_cache_management_client.models.ResourceSkuCapabilities] + :ivar locations: The set of locations that the SKU is available. This will be supported and + registered Azure Geo Regions (e.g., West US, East US, Southeast Asia, etc.). + :vartype locations: list[str] + :param location_info: The set of locations that the SKU is available. + :type location_info: list[~storage_cache_management_client.models.ResourceSkuLocationInfo] + :param name: The name of this SKU. + :type name: str + :param restrictions: The restrictions preventing this SKU from being used. This is empty if + there are no restrictions. + :type restrictions: list[~storage_cache_management_client.models.Restriction] + """ + + _validation = { + 'resource_type': {'readonly': True}, + 'locations': {'readonly': True}, + } + + _attribute_map = { + 'resource_type': {'key': 'resourceType', 'type': 'str'}, + 'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'}, + 'locations': {'key': 'locations', 'type': '[str]'}, + 'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'}, + 'name': {'key': 'name', 'type': 'str'}, + 'restrictions': {'key': 'restrictions', 'type': '[Restriction]'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSku, self).__init__(**kwargs) + self.resource_type = None + self.capabilities = kwargs.get('capabilities', None) + self.locations = None + self.location_info = kwargs.get('location_info', None) + self.name = kwargs.get('name', None) + self.restrictions = kwargs.get('restrictions', None) + + +class ResourceSkuCapabilities(msrest.serialization.Model): + """A resource SKU capability. + + :param name: Name of a capability, such as ops/sec. + :type name: str + :param value: Quantity, if the capability is measured by quantity. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSkuCapabilities, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) + + +class ResourceSkuLocationInfo(msrest.serialization.Model): + """Resource SKU location information. + + :param location: Location where this SKU is available. + :type location: str + :param zones: Zones if any. + :type zones: list[str] + """ + + _attribute_map = { + 'location': {'key': 'location', 'type': 'str'}, + 'zones': {'key': 'zones', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSkuLocationInfo, self).__init__(**kwargs) + self.location = kwargs.get('location', None) + self.zones = kwargs.get('zones', None) + + +class ResourceSkusResult(msrest.serialization.Model): + """The response from the List Cache SKUs operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param next_link: The URI to fetch the next page of Cache SKUs. + :type next_link: str + :ivar value: The list of SKUs available for the subscription. + :vartype value: list[~storage_cache_management_client.models.ResourceSku] + """ + + _validation = { + 'value': {'readonly': True}, + } + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ResourceSku]'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceSkusResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = None + + +class Restriction(msrest.serialization.Model): + """The restrictions preventing this SKU from being used. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The type of restrictions. In this version, the only possible value for this is + location. + :vartype type: str + :ivar values: The value of restrictions. If the restriction type is set to location, then this + would be the different locations where the SKU is restricted. + :vartype values: list[str] + :param reason_code: The reason for the restriction. As of now this can be "QuotaId" or + "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as + the subscription does not belong to that quota. "NotAvailableForSubscription" is related to + capacity at the datacenter. Possible values include: "QuotaId", "NotAvailableForSubscription". + :type reason_code: str or ~storage_cache_management_client.models.ReasonCode + """ + + _validation = { + 'type': {'readonly': True}, + 'values': {'readonly': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[str]'}, + 'reason_code': {'key': 'reasonCode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(Restriction, self).__init__(**kwargs) + self.type = None + self.values = None + self.reason_code = kwargs.get('reason_code', None) + + +class StorageTargetResource(msrest.serialization.Model): + """Resource used by a Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetResource, self).__init__(**kwargs) + self.name = None + self.id = None + self.type = None + + +class StorageTarget(StorageTargetResource): + """Type of the Storage Target. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + :param target_base_type: Type of the Storage Target.Constant filled by server. Possible values + include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'target_base_type': {'key': 'properties.targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'properties.junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'properties.targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'properties.nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'properties.clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'properties.unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTarget, self).__init__(**kwargs) + self.target_base_type = None + self.junctions = kwargs.get('junctions', None) + self.target_type = kwargs.get('target_type', None) + self.provisioning_state = kwargs.get('provisioning_state', None) + self.nfs3 = kwargs.get('nfs3', None) + self.clfs = kwargs.get('clfs', None) + self.unknown = kwargs.get('unknown', None) + + +class StorageTargetsResult(msrest.serialization.Model): + """A list of Storage Targets. + + :param next_link: The URI to fetch the next page of Storage Targets. + :type next_link: str + :param value: The list of Storage Targets defined for the Cache. + :type value: list[~storage_cache_management_client.models.StorageTarget] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[StorageTarget]'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetsResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) + + +class UnknownTarget(msrest.serialization.Model): + """Properties pertained to UnknownTarget. + + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + """ + + _attribute_map = { + 'unknown_map': {'key': 'unknownMap', 'type': '{str}'}, + } + + def __init__( + self, + **kwargs + ): + super(UnknownTarget, self).__init__(**kwargs) + self.unknown_map = kwargs.get('unknown_map', None) + + +class UnknownTargetProperties(StorageTargetProperties): + """Storage container for use as an Unknown Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + **kwargs + ): + super(UnknownTargetProperties, self).__init__(**kwargs) + self.target_base_type = 'unknown' + + +class UsageModel(msrest.serialization.Model): + """A usage model. + + :param display: Localized information describing this usage model. + :type display: ~storage_cache_management_client.models.UsageModelDisplay + :param model_name: Non-localized keyword name for this usage model. + :type model_name: str + :param target_type: The type of Storage Target to which this model is applicable (only nfs3 as + of this version). + :type target_type: str + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'UsageModelDisplay'}, + 'model_name': {'key': 'modelName', 'type': 'str'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(UsageModel, self).__init__(**kwargs) + self.display = kwargs.get('display', None) + self.model_name = kwargs.get('model_name', None) + self.target_type = kwargs.get('target_type', None) + + +class UsageModelDisplay(msrest.serialization.Model): + """Localized information describing this usage model. + + :param description: String to display for this usage model. + :type description: str + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(UsageModelDisplay, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + + +class UsageModelsResult(msrest.serialization.Model): + """A list of Cache usage models. + + :param next_link: The URI to fetch the next page of Cache usage models. + :type next_link: str + :param value: The list of usage models available for the subscription. + :type value: list[~storage_cache_management_client.models.UsageModel] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[UsageModel]'}, + } + + def __init__( + self, + **kwargs + ): + super(UsageModelsResult, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + self.value = kwargs.get('value', None) diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py new file mode 100644 index 00000000000..6ff8190a920 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py @@ -0,0 +1,1272 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Dict, List, Optional, Union + +import msrest.serialization + +from ._storage_cache_management_client_enums import * + + +class ApiOperation(msrest.serialization.Model): + """REST API operation description: see https://github.com/Azure/azure-rest-api-specs/blob/master/documentation/openapi-authoring-automated-guidelines.md#r3023-operationsapiimplementation. + + :param display: The object that represents the operation. + :type display: ~storage_cache_management_client.models.ApiOperationDisplay + :param name: Operation name: {provider}/{resource}/{operation}. + :type name: str + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'ApiOperationDisplay'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__( + self, + *, + display: Optional["ApiOperationDisplay"] = None, + name: Optional[str] = None, + **kwargs + ): + super(ApiOperation, self).__init__(**kwargs) + self.display = display + self.name = name + + +class ApiOperationDisplay(msrest.serialization.Model): + """The object that represents the operation. + + :param operation: Operation type: Read, write, delete, etc. + :type operation: str + :param provider: Service provider: Microsoft.StorageCache. + :type provider: str + :param resource: Resource on which the operation is performed: Cache, etc. + :type resource: str + """ + + _attribute_map = { + 'operation': {'key': 'operation', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'resource': {'key': 'resource', 'type': 'str'}, + } + + def __init__( + self, + *, + operation: Optional[str] = None, + provider: Optional[str] = None, + resource: Optional[str] = None, + **kwargs + ): + super(ApiOperationDisplay, self).__init__(**kwargs) + self.operation = operation + self.provider = provider + self.resource = resource + + +class ApiOperationListResult(msrest.serialization.Model): + """Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results. + + :param next_link: URL to get the next set of operation list results if there are any. + :type next_link: str + :param value: List of Resource Provider operations supported by the Microsoft.StorageCache + resource provider. + :type value: list[~storage_cache_management_client.models.ApiOperation] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ApiOperation]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["ApiOperation"]] = None, + **kwargs + ): + super(ApiOperationListResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value + + +class AscOperation(msrest.serialization.Model): + """The status of operation. + + :param id: The operation Id. + :type id: str + :param name: The operation name. + :type name: str + :param start_time: The start time of the operation. + :type start_time: str + :param end_time: The end time of the operation. + :type end_time: str + :param status: The status of the operation. + :type status: str + :param error: The error detail of the operation if any. + :type error: ~storage_cache_management_client.models.ErrorResponse + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'str'}, + 'end_time': {'key': 'endTime', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + 'error': {'key': 'error', 'type': 'ErrorResponse'}, + } + + def __init__( + self, + *, + id: Optional[str] = None, + name: Optional[str] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + status: Optional[str] = None, + error: Optional["ErrorResponse"] = None, + **kwargs + ): + super(AscOperation, self).__init__(**kwargs) + self.id = id + self.name = name + self.start_time = start_time + self.end_time = end_time + self.status = status + self.error = error + + +class Cache(msrest.serialization.Model): + """A Cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param tags: A set of tags. ARM tags as name/value pairs. + :type tags: object + :ivar id: Resource ID of the Cache. + :vartype id: str + :param location: Region name string. + :type location: str + :ivar name: Name of Cache. + :vartype name: str + :ivar type: Type of the Cache; Microsoft.StorageCache/Cache. + :vartype type: str + :param name_sku_name: SKU name for this Cache. + :type name_sku_name: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :ivar health: Health of the Cache. + :vartype health: ~storage_cache_management_client.models.CacheHealth + :ivar mount_addresses: Array of IP addresses that can be used by clients mounting this Cache. + :vartype mount_addresses: list[str] + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param network_settings: Specifies network settings of the cache. + :type network_settings: ~storage_cache_management_client.models.CacheNetworkSettings + :param encryption_settings: Specifies encryption settings of the cache. + :type encryption_settings: ~storage_cache_management_client.models.CacheEncryptionSettings + :param security_settings: Specifies security settings of the cache. + :type security_settings: ~storage_cache_management_client.models.CacheSecuritySettings + :ivar principal_id: The principal id of the cache. + :vartype principal_id: str + :ivar tenant_id: The tenant id associated with the cache. + :vartype tenant_id: str + :param type_identity_type: The type of identity used for the cache. Possible values include: + "SystemAssigned", "None". + :type type_identity_type: str or ~storage_cache_management_client.models.CacheIdentityType + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'type': {'readonly': True}, + 'health': {'readonly': True}, + 'mount_addresses': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'tenant_id': {'readonly': True}, + } + + _attribute_map = { + 'tags': {'key': 'tags', 'type': 'object'}, + 'id': {'key': 'id', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'name_sku_name': {'key': 'sku.name', 'type': 'str'}, + 'cache_size_gb': {'key': 'properties.cacheSizeGB', 'type': 'int'}, + 'health': {'key': 'properties.health', 'type': 'CacheHealth'}, + 'mount_addresses': {'key': 'properties.mountAddresses', 'type': '[str]'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'subnet': {'key': 'properties.subnet', 'type': 'str'}, + 'upgrade_status': {'key': 'properties.upgradeStatus', 'type': 'CacheUpgradeStatus'}, + 'network_settings': {'key': 'properties.networkSettings', 'type': 'CacheNetworkSettings'}, + 'encryption_settings': {'key': 'properties.encryptionSettings', 'type': 'CacheEncryptionSettings'}, + 'security_settings': {'key': 'properties.securitySettings', 'type': 'CacheSecuritySettings'}, + 'principal_id': {'key': 'identity.principalId', 'type': 'str'}, + 'tenant_id': {'key': 'identity.tenantId', 'type': 'str'}, + 'type_identity_type': {'key': 'identity.type', 'type': 'str'}, + } + + def __init__( + self, + *, + tags: Optional[object] = None, + location: Optional[str] = None, + name_sku_name: Optional[str] = None, + cache_size_gb: Optional[int] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + subnet: Optional[str] = None, + upgrade_status: Optional["CacheUpgradeStatus"] = None, + network_settings: Optional["CacheNetworkSettings"] = None, + encryption_settings: Optional["CacheEncryptionSettings"] = None, + security_settings: Optional["CacheSecuritySettings"] = None, + type_identity_type: Optional[Union[str, "CacheIdentityType"]] = None, + **kwargs + ): + super(Cache, self).__init__(**kwargs) + self.tags = tags + self.id = None + self.location = location + self.name = None + self.type = None + self.name_sku_name = name_sku_name + self.cache_size_gb = cache_size_gb + self.health = None + self.mount_addresses = None + self.provisioning_state = provisioning_state + self.subnet = subnet + self.upgrade_status = upgrade_status + self.network_settings = network_settings + self.encryption_settings = encryption_settings + self.security_settings = security_settings + self.principal_id = None + self.tenant_id = None + self.type_identity_type = type_identity_type + + +class CacheEncryptionSettings(msrest.serialization.Model): + """Cache encryption settings. + + :param key_encryption_key: Specifies the location of the key encryption key in Key Vault. + :type key_encryption_key: ~storage_cache_management_client.models.KeyVaultKeyReference + """ + + _attribute_map = { + 'key_encryption_key': {'key': 'keyEncryptionKey', 'type': 'KeyVaultKeyReference'}, + } + + def __init__( + self, + *, + key_encryption_key: Optional["KeyVaultKeyReference"] = None, + **kwargs + ): + super(CacheEncryptionSettings, self).__init__(**kwargs) + self.key_encryption_key = key_encryption_key + + +class CacheHealth(msrest.serialization.Model): + """An indication of Cache health. Gives more information about health than just that related to provisioning. + + :param state: List of Cache health states. Possible values include: "Unknown", "Healthy", + "Degraded", "Down", "Transitioning", "Stopping", "Stopped", "Upgrading", "Flushing". + :type state: str or ~storage_cache_management_client.models.HealthStateType + :param status_description: Describes explanation of state. + :type status_description: str + """ + + _attribute_map = { + 'state': {'key': 'state', 'type': 'str'}, + 'status_description': {'key': 'statusDescription', 'type': 'str'}, + } + + def __init__( + self, + *, + state: Optional[Union[str, "HealthStateType"]] = None, + status_description: Optional[str] = None, + **kwargs + ): + super(CacheHealth, self).__init__(**kwargs) + self.state = state + self.status_description = status_description + + +class CacheNetworkSettings(msrest.serialization.Model): + """Cache network settings. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param mtu: The IPv4 maximum transmission unit configured for the subnet. + :type mtu: int + :ivar utility_addresses: Array of additional IP addresses used by this Cache. + :vartype utility_addresses: list[str] + """ + + _validation = { + 'mtu': {'maximum': 1500, 'minimum': 576}, + 'utility_addresses': {'readonly': True}, + } + + _attribute_map = { + 'mtu': {'key': 'mtu', 'type': 'int'}, + 'utility_addresses': {'key': 'utilityAddresses', 'type': '[str]'}, + } + + def __init__( + self, + *, + mtu: Optional[int] = 1500, + **kwargs + ): + super(CacheNetworkSettings, self).__init__(**kwargs) + self.mtu = mtu + self.utility_addresses = None + + +class CacheSecuritySettings(msrest.serialization.Model): + """Cache security settings. + + :param root_squash: root squash of cache property. + :type root_squash: bool + """ + + _attribute_map = { + 'root_squash': {'key': 'rootSquash', 'type': 'bool'}, + } + + def __init__( + self, + *, + root_squash: Optional[bool] = None, + **kwargs + ): + super(CacheSecuritySettings, self).__init__(**kwargs) + self.root_squash = root_squash + + +class CachesListResult(msrest.serialization.Model): + """Result of the request to list Caches. It contains a list of Caches and a URL link to get the next set of results. + + :param next_link: URL to get the next set of Cache list results, if there are any. + :type next_link: str + :param value: List of Caches. + :type value: list[~storage_cache_management_client.models.Cache] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[Cache]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["Cache"]] = None, + **kwargs + ): + super(CachesListResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value + + +class CacheUpgradeStatus(msrest.serialization.Model): + """Properties describing the software upgrade state of the Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar current_firmware_version: Version string of the firmware currently installed on this + Cache. + :vartype current_firmware_version: str + :ivar firmware_update_status: True if there is a firmware update ready to install on this + Cache. The firmware will automatically be installed after firmwareUpdateDeadline if not + triggered earlier via the upgrade operation. Possible values include: "available", + "unavailable". + :vartype firmware_update_status: str or + ~storage_cache_management_client.models.FirmwareStatusType + :ivar firmware_update_deadline: Time at which the pending firmware update will automatically be + installed on the Cache. + :vartype firmware_update_deadline: ~datetime.datetime + :ivar last_firmware_update: Time of the last successful firmware update. + :vartype last_firmware_update: ~datetime.datetime + :ivar pending_firmware_version: When firmwareUpdateAvailable is true, this field holds the + version string for the update. + :vartype pending_firmware_version: str + """ + + _validation = { + 'current_firmware_version': {'readonly': True}, + 'firmware_update_status': {'readonly': True}, + 'firmware_update_deadline': {'readonly': True}, + 'last_firmware_update': {'readonly': True}, + 'pending_firmware_version': {'readonly': True}, + } + + _attribute_map = { + 'current_firmware_version': {'key': 'currentFirmwareVersion', 'type': 'str'}, + 'firmware_update_status': {'key': 'firmwareUpdateStatus', 'type': 'str'}, + 'firmware_update_deadline': {'key': 'firmwareUpdateDeadline', 'type': 'iso-8601'}, + 'last_firmware_update': {'key': 'lastFirmwareUpdate', 'type': 'iso-8601'}, + 'pending_firmware_version': {'key': 'pendingFirmwareVersion', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(CacheUpgradeStatus, self).__init__(**kwargs) + self.current_firmware_version = None + self.firmware_update_status = None + self.firmware_update_deadline = None + self.last_firmware_update = None + self.pending_firmware_version = None + + +class ClfsTarget(msrest.serialization.Model): + """Properties pertained to ClfsTarget. + + :param target: Resource ID of storage container. + :type target: str + """ + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + *, + target: Optional[str] = None, + **kwargs + ): + super(ClfsTarget, self).__init__(**kwargs) + self.target = target + + +class StorageTargetProperties(msrest.serialization.Model): + """Properties of the Storage Target. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClfsTargetProperties, Nfs3TargetProperties, UnknownTargetProperties. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + _subtype_map = { + 'target_base_type': {'clfs': 'ClfsTargetProperties', 'nfs3': 'Nfs3TargetProperties', 'unknown': 'UnknownTargetProperties'} + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(StorageTargetProperties, self).__init__(**kwargs) + self.target_base_type: Optional[str] = None + self.junctions = junctions + self.target_type = target_type + self.provisioning_state = provisioning_state + self.nfs3 = nfs3 + self.clfs = clfs + self.unknown = unknown + + +class ClfsTargetProperties(StorageTargetProperties): + """Storage container for use as a CLFS Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(ClfsTargetProperties, self).__init__(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown, **kwargs) + self.target_base_type: str = 'clfs' + + +class CloudErrorBody(msrest.serialization.Model): + """An error response. + + :param code: An identifier for the error. Codes are invariant and are intended to be consumed + programmatically. + :type code: str + :param details: A list of additional details about the error. + :type details: list[~storage_cache_management_client.models.CloudErrorBody] + :param message: A message describing the error, intended to be suitable for display in a user + interface. + :type message: str + :param target: The target of the particular error. For example, the name of the property in + error. + :type target: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[CloudErrorBody]'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + details: Optional[List["CloudErrorBody"]] = None, + message: Optional[str] = None, + target: Optional[str] = None, + **kwargs + ): + super(CloudErrorBody, self).__init__(**kwargs) + self.code = code + self.details = details + self.message = message + self.target = target + + +class ErrorResponse(msrest.serialization.Model): + """Describes the format of Error response. + + :param code: Error code. + :type code: str + :param message: Error message indicating why the operation failed. + :type message: str + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + } + + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + **kwargs + ): + super(ErrorResponse, self).__init__(**kwargs) + self.code = code + self.message = message + + +class KeyVaultKeyReference(msrest.serialization.Model): + """Describes a reference to Key Vault Key. + + All required parameters must be populated in order to send to Azure. + + :param key_url: Required. The URL referencing a key encryption key in Key Vault. + :type key_url: str + :param source_vault: Required. Describes a resource Id to source Key Vault. + :type source_vault: ~storage_cache_management_client.models.KeyVaultKeyReferenceSourceVault + """ + + _validation = { + 'key_url': {'required': True}, + 'source_vault': {'required': True}, + } + + _attribute_map = { + 'key_url': {'key': 'keyUrl', 'type': 'str'}, + 'source_vault': {'key': 'sourceVault', 'type': 'KeyVaultKeyReferenceSourceVault'}, + } + + def __init__( + self, + *, + key_url: str, + source_vault: "KeyVaultKeyReferenceSourceVault", + **kwargs + ): + super(KeyVaultKeyReference, self).__init__(**kwargs) + self.key_url = key_url + self.source_vault = source_vault + + +class KeyVaultKeyReferenceSourceVault(msrest.serialization.Model): + """Describes a resource Id to source Key Vault. + + :param id: Resource Id. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + } + + def __init__( + self, + *, + id: Optional[str] = None, + **kwargs + ): + super(KeyVaultKeyReferenceSourceVault, self).__init__(**kwargs) + self.id = id + + +class NamespaceJunction(msrest.serialization.Model): + """A namespace junction. + + :param namespace_path: Namespace path on a Cache for a Storage Target. + :type namespace_path: str + :param target_path: Path in Storage Target to which namespacePath points. + :type target_path: str + :param nfs_export: NFS export where targetPath exists. + :type nfs_export: str + """ + + _attribute_map = { + 'namespace_path': {'key': 'namespacePath', 'type': 'str'}, + 'target_path': {'key': 'targetPath', 'type': 'str'}, + 'nfs_export': {'key': 'nfsExport', 'type': 'str'}, + } + + def __init__( + self, + *, + namespace_path: Optional[str] = None, + target_path: Optional[str] = None, + nfs_export: Optional[str] = None, + **kwargs + ): + super(NamespaceJunction, self).__init__(**kwargs) + self.namespace_path = namespace_path + self.target_path = target_path + self.nfs_export = nfs_export + + +class Nfs3Target(msrest.serialization.Model): + """Properties pertained to Nfs3Target. + + :param target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44). + :type target: str + :param usage_model: Identifies the primary usage model to be used for this Storage Target. Get + choices from .../usageModels. + :type usage_model: str + """ + + _validation = { + 'target': {'pattern': r'^[-.0-9a-zA-Z]+$'}, + } + + _attribute_map = { + 'target': {'key': 'target', 'type': 'str'}, + 'usage_model': {'key': 'usageModel', 'type': 'str'}, + } + + def __init__( + self, + *, + target: Optional[str] = None, + usage_model: Optional[str] = None, + **kwargs + ): + super(Nfs3Target, self).__init__(**kwargs) + self.target = target + self.usage_model = usage_model + + +class Nfs3TargetProperties(StorageTargetProperties): + """An NFSv3 mount point for use as a Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(Nfs3TargetProperties, self).__init__(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown, **kwargs) + self.target_base_type: str = 'nfs3' + + +class ResourceSku(msrest.serialization.Model): + """A resource SKU. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar resource_type: The type of resource the SKU applies to. + :vartype resource_type: str + :param capabilities: A list of capabilities of this SKU, such as throughput or ops/sec. + :type capabilities: list[~storage_cache_management_client.models.ResourceSkuCapabilities] + :ivar locations: The set of locations that the SKU is available. This will be supported and + registered Azure Geo Regions (e.g., West US, East US, Southeast Asia, etc.). + :vartype locations: list[str] + :param location_info: The set of locations that the SKU is available. + :type location_info: list[~storage_cache_management_client.models.ResourceSkuLocationInfo] + :param name: The name of this SKU. + :type name: str + :param restrictions: The restrictions preventing this SKU from being used. This is empty if + there are no restrictions. + :type restrictions: list[~storage_cache_management_client.models.Restriction] + """ + + _validation = { + 'resource_type': {'readonly': True}, + 'locations': {'readonly': True}, + } + + _attribute_map = { + 'resource_type': {'key': 'resourceType', 'type': 'str'}, + 'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'}, + 'locations': {'key': 'locations', 'type': '[str]'}, + 'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'}, + 'name': {'key': 'name', 'type': 'str'}, + 'restrictions': {'key': 'restrictions', 'type': '[Restriction]'}, + } + + def __init__( + self, + *, + capabilities: Optional[List["ResourceSkuCapabilities"]] = None, + location_info: Optional[List["ResourceSkuLocationInfo"]] = None, + name: Optional[str] = None, + restrictions: Optional[List["Restriction"]] = None, + **kwargs + ): + super(ResourceSku, self).__init__(**kwargs) + self.resource_type = None + self.capabilities = capabilities + self.locations = None + self.location_info = location_info + self.name = name + self.restrictions = restrictions + + +class ResourceSkuCapabilities(msrest.serialization.Model): + """A resource SKU capability. + + :param name: Name of a capability, such as ops/sec. + :type name: str + :param value: Quantity, if the capability is measured by quantity. + :type value: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + value: Optional[str] = None, + **kwargs + ): + super(ResourceSkuCapabilities, self).__init__(**kwargs) + self.name = name + self.value = value + + +class ResourceSkuLocationInfo(msrest.serialization.Model): + """Resource SKU location information. + + :param location: Location where this SKU is available. + :type location: str + :param zones: Zones if any. + :type zones: list[str] + """ + + _attribute_map = { + 'location': {'key': 'location', 'type': 'str'}, + 'zones': {'key': 'zones', 'type': '[str]'}, + } + + def __init__( + self, + *, + location: Optional[str] = None, + zones: Optional[List[str]] = None, + **kwargs + ): + super(ResourceSkuLocationInfo, self).__init__(**kwargs) + self.location = location + self.zones = zones + + +class ResourceSkusResult(msrest.serialization.Model): + """The response from the List Cache SKUs operation. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param next_link: The URI to fetch the next page of Cache SKUs. + :type next_link: str + :ivar value: The list of SKUs available for the subscription. + :vartype value: list[~storage_cache_management_client.models.ResourceSku] + """ + + _validation = { + 'value': {'readonly': True}, + } + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[ResourceSku]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + **kwargs + ): + super(ResourceSkusResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = None + + +class Restriction(msrest.serialization.Model): + """The restrictions preventing this SKU from being used. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The type of restrictions. In this version, the only possible value for this is + location. + :vartype type: str + :ivar values: The value of restrictions. If the restriction type is set to location, then this + would be the different locations where the SKU is restricted. + :vartype values: list[str] + :param reason_code: The reason for the restriction. As of now this can be "QuotaId" or + "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as + the subscription does not belong to that quota. "NotAvailableForSubscription" is related to + capacity at the datacenter. Possible values include: "QuotaId", "NotAvailableForSubscription". + :type reason_code: str or ~storage_cache_management_client.models.ReasonCode + """ + + _validation = { + 'type': {'readonly': True}, + 'values': {'readonly': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'values': {'key': 'values', 'type': '[str]'}, + 'reason_code': {'key': 'reasonCode', 'type': 'str'}, + } + + def __init__( + self, + *, + reason_code: Optional[Union[str, "ReasonCode"]] = None, + **kwargs + ): + super(Restriction, self).__init__(**kwargs) + self.type = None + self.values = None + self.reason_code = reason_code + + +class StorageTargetResource(msrest.serialization.Model): + """Resource used by a Cache. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(StorageTargetResource, self).__init__(**kwargs) + self.name = None + self.id = None + self.type = None + + +class StorageTarget(StorageTargetResource): + """Type of the Storage Target. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: Name of the Storage Target. + :vartype name: str + :ivar id: Resource ID of the Storage Target. + :vartype id: str + :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget. + :vartype type: str + :param target_base_type: Type of the Storage Target.Constant filled by server. Possible values + include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'}, + 'id': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'target_base_type': {'key': 'properties.targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'properties.junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'properties.targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'properties.nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'properties.clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'properties.unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(StorageTarget, self).__init__(**kwargs) + self.target_base_type: Optional[str] = None + self.junctions = junctions + self.target_type = target_type + self.provisioning_state = provisioning_state + self.nfs3 = nfs3 + self.clfs = clfs + self.unknown = unknown + + +class StorageTargetsResult(msrest.serialization.Model): + """A list of Storage Targets. + + :param next_link: The URI to fetch the next page of Storage Targets. + :type next_link: str + :param value: The list of Storage Targets defined for the Cache. + :type value: list[~storage_cache_management_client.models.StorageTarget] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[StorageTarget]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["StorageTarget"]] = None, + **kwargs + ): + super(StorageTargetsResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value + + +class UnknownTarget(msrest.serialization.Model): + """Properties pertained to UnknownTarget. + + :param unknown_map: Dictionary of string->string pairs containing information about the Storage + Target. + :type unknown_map: dict[str, str] + """ + + _attribute_map = { + 'unknown_map': {'key': 'unknownMap', 'type': '{str}'}, + } + + def __init__( + self, + *, + unknown_map: Optional[Dict[str, str]] = None, + **kwargs + ): + super(UnknownTarget, self).__init__(**kwargs) + self.unknown_map = unknown_map + + +class UnknownTargetProperties(StorageTargetProperties): + """Storage container for use as an Unknown Storage Target. + + All required parameters must be populated in order to send to Azure. + + :param target_base_type: Required. Type of the Storage Target.Constant filled by server. + Possible values include: "nfs3", "clfs", "unknown". + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include: + "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating". + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + """ + + _validation = { + 'target_base_type': {'required': True}, + } + + _attribute_map = { + 'target_base_type': {'key': 'targetBaseType', 'type': 'str'}, + 'junctions': {'key': 'junctions', 'type': '[NamespaceJunction]'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + 'provisioning_state': {'key': 'provisioningState', 'type': 'str'}, + 'nfs3': {'key': 'nfs3', 'type': 'Nfs3Target'}, + 'clfs': {'key': 'clfs', 'type': 'ClfsTarget'}, + 'unknown': {'key': 'unknown', 'type': 'UnknownTarget'}, + } + + def __init__( + self, + *, + junctions: Optional[List["NamespaceJunction"]] = None, + target_type: Optional[str] = None, + provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None, + nfs3: Optional["Nfs3Target"] = None, + clfs: Optional["ClfsTarget"] = None, + unknown: Optional["UnknownTarget"] = None, + **kwargs + ): + super(UnknownTargetProperties, self).__init__(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown, **kwargs) + self.target_base_type: str = 'unknown' + + +class UsageModel(msrest.serialization.Model): + """A usage model. + + :param display: Localized information describing this usage model. + :type display: ~storage_cache_management_client.models.UsageModelDisplay + :param model_name: Non-localized keyword name for this usage model. + :type model_name: str + :param target_type: The type of Storage Target to which this model is applicable (only nfs3 as + of this version). + :type target_type: str + """ + + _attribute_map = { + 'display': {'key': 'display', 'type': 'UsageModelDisplay'}, + 'model_name': {'key': 'modelName', 'type': 'str'}, + 'target_type': {'key': 'targetType', 'type': 'str'}, + } + + def __init__( + self, + *, + display: Optional["UsageModelDisplay"] = None, + model_name: Optional[str] = None, + target_type: Optional[str] = None, + **kwargs + ): + super(UsageModel, self).__init__(**kwargs) + self.display = display + self.model_name = model_name + self.target_type = target_type + + +class UsageModelDisplay(msrest.serialization.Model): + """Localized information describing this usage model. + + :param description: String to display for this usage model. + :type description: str + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + **kwargs + ): + super(UsageModelDisplay, self).__init__(**kwargs) + self.description = description + + +class UsageModelsResult(msrest.serialization.Model): + """A list of Cache usage models. + + :param next_link: The URI to fetch the next page of Cache usage models. + :type next_link: str + :param value: The list of usage models available for the subscription. + :type value: list[~storage_cache_management_client.models.UsageModel] + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'value': {'key': 'value', 'type': '[UsageModel]'}, + } + + def __init__( + self, + *, + next_link: Optional[str] = None, + value: Optional[List["UsageModel"]] = None, + **kwargs + ): + super(UsageModelsResult, self).__init__(**kwargs) + self.next_link = next_link + self.value = value diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py new file mode 100644 index 00000000000..0df80c1c973 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum + +class CacheIdentityType(str, Enum): + """The type of identity used for the cache + """ + + system_assigned = "SystemAssigned" + none = "None" + +class FirmwareStatusType(str, Enum): + """True if there is a firmware update ready to install on this Cache. The firmware will + automatically be installed after firmwareUpdateDeadline if not triggered earlier via the + upgrade operation. + """ + + available = "available" + unavailable = "unavailable" + +class HealthStateType(str, Enum): + """List of Cache health states. + """ + + unknown = "Unknown" + healthy = "Healthy" + degraded = "Degraded" + down = "Down" + transitioning = "Transitioning" + stopping = "Stopping" + stopped = "Stopped" + upgrading = "Upgrading" + flushing = "Flushing" + +class ProvisioningStateType(str, Enum): + """ARM provisioning state, see https://github.com/Azure/azure-resource-manager- + rpc/blob/master/v1.0/Addendum.md#provisioningstate-property + """ + + succeeded = "Succeeded" + failed = "Failed" + cancelled = "Cancelled" + creating = "Creating" + deleting = "Deleting" + updating = "Updating" + +class ReasonCode(str, Enum): + """The reason for the restriction. As of now this can be "QuotaId" or + "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as + the subscription does not belong to that quota. "NotAvailableForSubscription" is related to + capacity at the datacenter. + """ + + quota_id = "QuotaId" + not_available_for_subscription = "NotAvailableForSubscription" + +class StorageTargetType(str, Enum): + """Type of the Storage Target. + """ + + nfs3 = "nfs3" + clfs = "clfs" + unknown = "unknown" diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py new file mode 100644 index 00000000000..f3ad75917da --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operation_operations import OperationOperations +from ._sku_operations import SkuOperations +from ._usage_model_operations import UsageModelOperations +from ._asc_operation_operations import AscOperationOperations +from ._cache_operations import CacheOperations +from ._storage_target_operations import StorageTargetOperations + +__all__ = [ + 'OperationOperations', + 'SkuOperations', + 'UsageModelOperations', + 'AscOperationOperations', + 'CacheOperations', + 'StorageTargetOperations', +] diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py new file mode 100644 index 00000000000..5519eb3cdf9 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class AscOperationOperations(object): + """AscOperationOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def get( + self, + location, # type: str + operation_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.AscOperation" + """Gets the status of an asynchronous operation for the Azure HPC cache. + + :param location: The region name which the operation will lookup into. + :type location: str + :param operation_id: The operation id which uniquely identifies the asynchronous operation. + :type operation_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: AscOperation, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.AscOperation + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.AscOperation"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'location': self._serialize.url("location", location, 'str'), + 'operationId': self._serialize.url("operation_id", operation_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('AscOperation', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/locations/{location}/ascOperations/{operationId}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py new file mode 100644 index 00000000000..6635385e581 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py @@ -0,0 +1,1041 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class CacheOperations(object): + """CacheOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.CachesListResult"] + """Returns all Caches the user has access to under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches'} # type: ignore + + def list_by_resource_group( + self, + resource_group_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.CachesListResult"] + """Returns all Caches the user has access to under a resource group. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either CachesListResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.CachesListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_resource_group.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('CachesListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches'} # type: ignore + + def _delete_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def begin_delete( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Schedules a Cache for deletion. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def get( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.Cache" + """Returns a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def _create_or_update_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + tags=None, # type: Optional[object] + location=None, # type: Optional[str] + name=None, # type: Optional[str] + cache_size_gb=None, # type: Optional[int] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + subnet=None, # type: Optional[str] + upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"] + network_settings=None, # type: Optional["models.CacheNetworkSettings"] + encryption_settings=None, # type: Optional["models.CacheEncryptionSettings"] + security_settings=None, # type: Optional["models.CacheSecuritySettings"] + type=None, # type: Optional[Union[str, "models.CacheIdentityType"]] + **kwargs # type: Any + ): + # type: (...) -> "models.Cache" + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + _cache = models.Cache(tags=tags, location=location, name_sku_name=name, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, network_settings=network_settings, encryption_settings=encryption_settings, security_settings=security_settings, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + if _cache is not None: + body_content = self._serialize.body(_cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('Cache', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def begin_create_or_update( + self, + resource_group_name, # type: str + cache_name, # type: str + tags=None, # type: Optional[object] + location=None, # type: Optional[str] + name=None, # type: Optional[str] + cache_size_gb=None, # type: Optional[int] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + subnet=None, # type: Optional[str] + upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"] + network_settings=None, # type: Optional["models.CacheNetworkSettings"] + encryption_settings=None, # type: Optional["models.CacheEncryptionSettings"] + security_settings=None, # type: Optional["models.CacheSecuritySettings"] + type=None, # type: Optional[Union[str, "models.CacheIdentityType"]] + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Create or update a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param name: SKU name for this Cache. + :type name: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param network_settings: Specifies network settings of the cache. + :type network_settings: ~storage_cache_management_client.models.CacheNetworkSettings + :param encryption_settings: Specifies encryption settings of the cache. + :type encryption_settings: ~storage_cache_management_client.models.CacheEncryptionSettings + :param security_settings: Specifies security settings of the cache. + :type security_settings: ~storage_cache_management_client.models.CacheSecuritySettings + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either Cache or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~storage_cache_management_client.models.Cache] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + tags=tags, + location=location, + name=name, + cache_size_gb=cache_size_gb, + provisioning_state=provisioning_state, + subnet=subnet, + upgrade_status=upgrade_status, + network_settings=network_settings, + encryption_settings=encryption_settings, + security_settings=security_settings, + type=type, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def update( + self, + resource_group_name, # type: str + cache_name, # type: str + tags=None, # type: Optional[object] + location=None, # type: Optional[str] + name=None, # type: Optional[str] + cache_size_gb=None, # type: Optional[int] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + subnet=None, # type: Optional[str] + upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"] + network_settings=None, # type: Optional["models.CacheNetworkSettings"] + encryption_settings=None, # type: Optional["models.CacheEncryptionSettings"] + security_settings=None, # type: Optional["models.CacheSecuritySettings"] + type=None, # type: Optional[Union[str, "models.CacheIdentityType"]] + **kwargs # type: Any + ): + # type: (...) -> "models.Cache" + """Update a Cache instance. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param tags: ARM tags as name/value pairs. + :type tags: object + :param location: Region name string. + :type location: str + :param name: SKU name for this Cache. + :type name: str + :param cache_size_gb: The size of this Cache, in GB. + :type cache_size_gb: int + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param subnet: Subnet used for the Cache. + :type subnet: str + :param upgrade_status: Upgrade status of the Cache. + :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus + :param network_settings: Specifies network settings of the cache. + :type network_settings: ~storage_cache_management_client.models.CacheNetworkSettings + :param encryption_settings: Specifies encryption settings of the cache. + :type encryption_settings: ~storage_cache_management_client.models.CacheEncryptionSettings + :param security_settings: Specifies security settings of the cache. + :type security_settings: ~storage_cache_management_client.models.CacheSecuritySettings + :param type: The type of identity used for the cache. + :type type: str or ~storage_cache_management_client.models.CacheIdentityType + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Cache, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.Cache + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + _cache = models.Cache(tags=tags, location=location, name_sku_name=name, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, network_settings=network_settings, encryption_settings=encryption_settings, security_settings=security_settings, type_identity_type=type) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self.update.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + if _cache is not None: + body_content = self._serialize.body(_cache, 'Cache') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('Cache', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore + + def _flush_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._flush_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _flush_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + def begin_flush( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will see errors returned until the flush is complete. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._flush_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_flush.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore + + def _start_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._start_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + def begin_start( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Tells a Stopped state Cache to transition to Active state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._start_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore + + def _stop_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._stop_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + def begin_stop( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Tells an Active Cache to transition to Stopped state. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._stop_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore + + def _upgrade_firmware_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._upgrade_firmware_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 201: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _upgrade_firmware_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore + + def begin_upgrade_firmware( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no effect. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._upgrade_firmware_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_upgrade_firmware.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py new file mode 100644 index 00000000000..80c3a48458a --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class OperationOperations(object): + """OperationOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.ApiOperationListResult"] + """Lists all of the available Resource Provider operations. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ApiOperationListResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.ApiOperationListResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ApiOperationListResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('ApiOperationListResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/providers/Microsoft.StorageCache/operations'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py new file mode 100644 index 00000000000..99e28fadb9f --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class SkuOperations(object): + """SkuOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.ResourceSkusResult"] + """Get the list of StorageCache.Cache SKUs available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either ResourceSkusResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.ResourceSkusResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceSkusResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('ResourceSkusResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py new file mode 100644 index 00000000000..1fabdf840c9 --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py @@ -0,0 +1,453 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class StorageTargetOperations(object): + """StorageTargetOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list_by_cache( + self, + resource_group_name, # type: str + cache_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.StorageTargetsResult"] + """Returns a list of Storage Targets for the specified Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either StorageTargetsResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.StorageTargetsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTargetsResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_cache.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('StorageTargetsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list_by_cache.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets'} # type: ignore + + def _delete_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> object + cls = kwargs.pop('cls', None) # type: ClsType[object] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self._delete_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize('object', pipeline_response) + + if response.status_code == 204: + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def begin_delete( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache is down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache is healthy again. Note that if the Cache has data to flush to the Storage Target, the data will be flushed before the Storage Target will be deleted. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of Storage Target. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either object or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[object] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[object] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('object', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def get( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.StorageTarget" + """Returns a Storage Target from a Cache. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageTarget, or the result of cls(response) + :rtype: ~storage_cache_management_client.models.StorageTarget + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + # Construct URL + url = self.get.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def _create_or_update_initial( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + target_base_type=None, # type: Optional[Union[str, "models.StorageTargetType"]] + junctions=None, # type: Optional[List["models.NamespaceJunction"]] + target_type=None, # type: Optional[str] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + nfs3=None, # type: Optional["models.Nfs3Target"] + clfs=None, # type: Optional["models.ClfsTarget"] + unknown=None, # type: Optional["models.UnknownTarget"] + **kwargs # type: Any + ): + # type: (...) -> "models.StorageTarget" + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + _storagetarget = models.StorageTarget(target_base_type=target_base_type, junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, clfs=clfs, unknown=unknown) + api_version = "2020-03-01" + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self._create_or_update_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + header_parameters['Accept'] = 'application/json' + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + if _storagetarget is not None: + body_content = self._serialize.body(_storagetarget, 'StorageTarget') + else: + body_content = None + body_content_kwargs['content'] = body_content + request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if response.status_code == 201: + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore + + def begin_create_or_update( + self, + resource_group_name, # type: str + cache_name, # type: str + storage_target_name, # type: str + target_base_type=None, # type: Optional[Union[str, "models.StorageTargetType"]] + junctions=None, # type: Optional[List["models.NamespaceJunction"]] + target_type=None, # type: Optional[str] + provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]] + nfs3=None, # type: Optional["models.Nfs3Target"] + clfs=None, # type: Optional["models.ClfsTarget"] + unknown=None, # type: Optional["models.UnknownTarget"] + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Create or update a Storage Target. This operation is allowed at any time, but if the Cache is down or unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again. + + :param resource_group_name: Target resource group. + :type resource_group_name: str + :param cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be + in list of [-0-9a-zA-Z_] char class. + :type cache_name: str + :param storage_target_name: Name of the Storage Target. Length of name must be not greater than + 80 and chars must be in list of [-0-9a-zA-Z_] char class. + :type storage_target_name: str + :param target_base_type: Type of the Storage Target. + :type target_base_type: str or ~storage_cache_management_client.models.StorageTargetType + :param junctions: List of Cache namespace junctions to target for namespace associations. + :type junctions: list[~storage_cache_management_client.models.NamespaceJunction] + :param target_type: Type of the Storage Target. + :type target_type: str + :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource- + manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. + :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType + :param nfs3: Properties when targetType is nfs3. + :type nfs3: ~storage_cache_management_client.models.Nfs3Target + :param clfs: Properties when targetType is clfs. + :type clfs: ~storage_cache_management_client.models.ClfsTarget + :param unknown: Properties when targetType is unknown. + :type unknown: ~storage_cache_management_client.models.UnknownTarget + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns either StorageTarget or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~storage_cache_management_client.models.StorageTarget] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + cache_name=cache_name, + storage_target_name=storage_target_name, + target_base_type=target_base_type, + junctions=junctions, + target_type=target_type, + provisioning_state=provisioning_state, + nfs3=nfs3, + clfs=clfs, + unknown=unknown, + cls=lambda x,y,z: x, + **kwargs + ) + + kwargs.pop('error_map', None) + kwargs.pop('content_type', None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize('StorageTarget', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py new file mode 100644 index 00000000000..e601cf21cda --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import TYPE_CHECKING +import warnings + +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.mgmt.core.exceptions import ARMErrorFormat + +from .. import models + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +class UsageModelOperations(object): + """UsageModelOperations operations. + + You should not instantiate this class directly. Instead, you should create a Client instance that + instantiates it for you and attaches it as an attribute. + + :ivar models: Alias to model classes used in this operation group. + :type models: ~storage_cache_management_client.models + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self._config = config + + def list( + self, + **kwargs # type: Any + ): + # type: (...) -> Iterable["models.UsageModelsResult"] + """Get the list of Cache Usage Models available to this subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either UsageModelsResult or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.UsageModelsResult] + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.UsageModelsResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + api_version = "2020-03-01" + + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] # type: ignore + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + else: + url = next_link + query_parameters = {} # type: Dict[str, Any] + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize('UsageModelsResult', pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged( + get_next, extract_data + ) + list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed new file mode 100644 index 00000000000..e5aff4f83af --- /dev/null +++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/src/storagecache/report.md b/src/storagecache/report.md new file mode 100644 index 00000000000..88947713ad5 --- /dev/null +++ b/src/storagecache/report.md @@ -0,0 +1,171 @@ +# Azure CLI Module Creation Report + +### storagecache asc-operation show + +show a storagecache asc-operation. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--location**|string|The region name which the operation will lookup into.|location| +|**--operation-id**|string|The operation id which uniquely identifies the asynchronous operation.|operation_id| +### storagecache cache create + +create a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +|**--tags**|any|ARM tags as name/value pairs.|tags| +|**--location**|string|Region name string.|location| +|**--sku-name**|string|SKU name for this Cache.|name_sku_name| +|**--cache-size-gb**|integer|The size of this Cache, in GB.|cache_size_gb| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state| +|**--subnet**|string|Subnet used for the Cache.|subnet| +|**--network-settings**|object|Specifies network settings of the cache.|network_settings| +|**--encryption-settings**|object|Specifies encryption settings of the cache.|encryption_settings| +|**--security-settings**|object|Specifies security settings of the cache.|security_settings| +|**--identity-type**|sealed-choice|The type of identity used for the cache|type_identity_type| +### storagecache cache delete + +delete a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache cache flush + +flush a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache cache list + +list a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +### storagecache cache show + +show a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache cache start + +start a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache cache stop + +stop a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache cache update + +update a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +|**--tags**|any|ARM tags as name/value pairs.|tags| +|**--location**|string|Region name string.|location| +|**--sku-name**|string|SKU name for this Cache.|name_sku_name| +|**--cache-size-gb**|integer|The size of this Cache, in GB.|cache_size_gb| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state| +|**--subnet**|string|Subnet used for the Cache.|subnet| +|**--network-settings**|object|Specifies network settings of the cache.|network_settings| +|**--encryption-settings**|object|Specifies encryption settings of the cache.|encryption_settings| +|**--security-settings**|object|Specifies security settings of the cache.|security_settings| +|**--identity-type**|sealed-choice|The type of identity used for the cache|type_identity_type| +### storagecache cache upgrade-firmware + +upgrade-firmware a storagecache cache. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache sku list + +list a storagecache sku. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +### storagecache storage-target create + +create a storagecache storage-target. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +|**--storage-target-name**|string|Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|storage_target_name| +|**--target-base-type**|choice|Type of the Storage Target.|target_base_type| +|**--junctions**|array|List of Cache namespace junctions to target for namespace associations.|junctions| +|**--target-type**|string|Type of the Storage Target.|target_type| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state| +|**--nfs3**|object|Properties when targetType is nfs3.|nfs3| +|**--clfs**|object|Properties when targetType is clfs.|clfs| +|**--unknown**|object|Properties when targetType is unknown.|unknown| +### storagecache storage-target delete + +delete a storagecache storage-target. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +|**--storage-target-name**|string|Name of Storage Target.|storage_target_name| +### storagecache storage-target list + +list a storagecache storage-target. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +### storagecache storage-target show + +show a storagecache storage-target. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +|**--storage-target-name**|string|Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|storage_target_name| +### storagecache storage-target update + +create a storagecache storage-target. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| +|**--resource-group-name**|string|Target resource group.|resource_group_name| +|**--cache-name**|string|Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|cache_name| +|**--storage-target-name**|string|Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.|storage_target_name| +|**--target-base-type**|choice|Type of the Storage Target.|target_base_type| +|**--junctions**|array|List of Cache namespace junctions to target for namespace associations.|junctions| +|**--target-type**|string|Type of the Storage Target.|target_type| +|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state| +|**--nfs3**|object|Properties when targetType is nfs3.|nfs3| +|**--clfs**|object|Properties when targetType is clfs.|clfs| +|**--unknown**|object|Properties when targetType is unknown.|unknown| +### storagecache usage-model list + +list a storagecache usage-model. + +|Option|Type|Description|Path (SDK)|Path (swagger)| +|------|----|-----------|----------|--------------| \ No newline at end of file diff --git a/src/storagecache/setup.cfg b/src/storagecache/setup.cfg new file mode 100644 index 00000000000..2fdd96e5d39 --- /dev/null +++ b/src/storagecache/setup.cfg @@ -0,0 +1 @@ +#setup.cfg \ No newline at end of file diff --git a/src/storagecache/setup.py b/src/storagecache/setup.py new file mode 100644 index 00000000000..09a49faa0e1 --- /dev/null +++ b/src/storagecache/setup.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +from codecs import open +from setuptools import setup, find_packages + +# HISTORY.rst entry. +VERSION = '0.1.0' +try: + from .manual.version import VERSION +except ImportError: + pass + +# The full list of classifiers is available at +# https://pypi.python.org/pypi?%3Aaction=list_classifiers +CLASSIFIERS = [ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'Programming Language :: Python', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'License :: OSI Approved :: MIT License', +] + +DEPENDENCIES = [] +try: + from .manual.dependency import DEPENDENCIES +except ImportError: + pass + +with open('README.md', 'r', encoding='utf-8') as f: + README = f.read() +with open('HISTORY.rst', 'r', encoding='utf-8') as f: + HISTORY = f.read() + +setup( + name='storagecache', + version=VERSION, + description='Microsoft Azure Command-Line Tools StorageCacheManagementClient Extension', + author='Microsoft Corporation', + author_email='azpycli@microsoft.com', + url='https://github.com/Azure/azure-cli-extensions/tree/master/src/storagecache', + long_description=README + '\n\n' + HISTORY, + license='MIT', + classifiers=CLASSIFIERS, + packages=find_packages(), + install_requires=DEPENDENCIES, + package_data={'azext_storagecache': ['azext_metadata.json']}, +)