diff --git a/src/aks-preview/azext_aks_preview/_params.py b/src/aks-preview/azext_aks_preview/_params.py index b8df955f7ce..3611a4dddea 100644 --- a/src/aks-preview/azext_aks_preview/_params.py +++ b/src/aks-preview/azext_aks_preview/_params.py @@ -21,12 +21,12 @@ ) from knack.arguments import CLIArgumentType -from ._completers import ( +from azext_aks_preview._completers import ( get_k8s_upgrades_completion_list, get_k8s_versions_completion_list, get_vm_size_completion_list, ) -from ._consts import ( +from azext_aks_preview._consts import ( CONST_CREDENTIAL_FORMAT_AZURE, CONST_CREDENTIAL_FORMAT_EXEC, CONST_GPU_INSTANCE_PROFILE_MIG1_G, @@ -69,7 +69,7 @@ CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PUBLIC, CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE, ) -from ._validators import ( +from azext_aks_preview._validators import ( validate_acr, validate_addon, validate_addons, diff --git a/src/aks-preview/azext_aks_preview/_podidentity.py b/src/aks-preview/azext_aks_preview/_podidentity.py index d2553c90aff..7a62d7d655c 100644 --- a/src/aks-preview/azext_aks_preview/_podidentity.py +++ b/src/aks-preview/azext_aks_preview/_podidentity.py @@ -8,10 +8,12 @@ from knack.log import get_logger from knack.util import CLIError -from ._client_factory import get_auth_management_client -from ._consts import (CONST_MANAGED_IDENTITY_OPERATOR_ROLE, - CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID) -from ._roleassignments import add_role_assignment +from azext_aks_preview._client_factory import get_auth_management_client +from azext_aks_preview._consts import ( + CONST_MANAGED_IDENTITY_OPERATOR_ROLE, + CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID, +) +from azext_aks_preview._roleassignments import add_role_assignment logger = get_logger(__name__) diff --git a/src/aks-preview/azext_aks_preview/_resourcegroup.py b/src/aks-preview/azext_aks_preview/_resourcegroup.py index f964396e41d..81cf62db847 100644 --- a/src/aks-preview/azext_aks_preview/_resourcegroup.py +++ b/src/aks-preview/azext_aks_preview/_resourcegroup.py @@ -4,7 +4,7 @@ # -------------------------------------------------------------------------------------------- from knack.util import CLIError -from ._client_factory import cf_resource_groups +from azext_aks_preview._client_factory import cf_resource_groups def get_rg_location(ctx, resource_group_name, subscription_id=None): diff --git a/src/aks-preview/azext_aks_preview/_roleassignments.py b/src/aks-preview/azext_aks_preview/_roleassignments.py index 438cfacd0b0..ae3238dd8f3 100644 --- a/src/aks-preview/azext_aks_preview/_roleassignments.py +++ b/src/aks-preview/azext_aks_preview/_roleassignments.py @@ -9,7 +9,7 @@ from knack.log import get_logger from knack.util import CLIError from msrestazure.azure_exceptions import CloudError -from ._client_factory import get_auth_management_client, get_graph_rbac_management_client +from azext_aks_preview._client_factory import get_auth_management_client, get_graph_rbac_management_client logger = get_logger(__name__) diff --git a/src/aks-preview/azext_aks_preview/_validators.py b/src/aks-preview/azext_aks_preview/_validators.py index f32a2e1a091..2a314a69396 100644 --- a/src/aks-preview/azext_aks_preview/_validators.py +++ b/src/aks-preview/azext_aks_preview/_validators.py @@ -4,26 +4,25 @@ # -------------------------------------------------------------------------------------------- from __future__ import unicode_literals + import os import os.path import re -from math import isnan, isclose from ipaddress import ip_network +from math import isclose, isnan -from knack.log import get_logger - -from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError, RequiredArgumentMissingError +import azure.cli.core.keys as keys +from azure.cli.core.azclierror import ( + ArgumentUsageError, + InvalidArgumentValueError, + RequiredArgumentMissingError, +) from azure.cli.core.commands.validators import validate_tag from azure.cli.core.util import CLIError -import azure.cli.core.keys as keys - -from ._helpers import (_fuzzy_match) +from knack.log import get_logger -from ._consts import ( - ADDONS, - CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PUBLIC, - CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE, -) +from azext_aks_preview._consts import ADDONS +from azext_aks_preview._helpers import _fuzzy_match logger = get_logger(__name__) diff --git a/src/aks-preview/azext_aks_preview/addonconfiguration.py b/src/aks-preview/azext_aks_preview/addonconfiguration.py index 0713a6903c9..8671e52ac73 100644 --- a/src/aks-preview/azext_aks_preview/addonconfiguration.py +++ b/src/aks-preview/azext_aks_preview/addonconfiguration.py @@ -11,16 +11,30 @@ from azure.cli.core.commands.client_factory import get_subscription_id from azure.cli.core.util import sdk_no_wait from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW -from ._client_factory import cf_resources, cf_resource_groups -from ._resourcegroup import get_rg_location -from ._roleassignments import add_role_assignment -from ._consts import ADDONS, CONST_VIRTUAL_NODE_ADDON_NAME, CONST_MONITORING_ADDON_NAME, \ - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, CONST_MONITORING_USING_AAD_MSI_AUTH, \ - CONST_VIRTUAL_NODE_SUBNET_NAME, CONST_INGRESS_APPGW_ADDON_NAME, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME, \ - CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_SUBNET_ID, \ - CONST_INGRESS_APPGW_WATCH_NAMESPACE, CONST_OPEN_SERVICE_MESH_ADDON_NAME, CONST_CONFCOM_ADDON_NAME, \ - CONST_ACC_SGX_QUOTE_HELPER_ENABLED, CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED, CONST_ROTATION_POLL_INTERVAL, \ - CONST_KUBE_DASHBOARD_ADDON_NAME +from azext_aks_preview._client_factory import cf_resources, cf_resource_groups +from azext_aks_preview._resourcegroup import get_rg_location +from azext_aks_preview._roleassignments import add_role_assignment +from azext_aks_preview._consts import ( + ADDONS, + CONST_VIRTUAL_NODE_ADDON_NAME, + CONST_MONITORING_ADDON_NAME, + CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, + CONST_MONITORING_USING_AAD_MSI_AUTH, + CONST_VIRTUAL_NODE_SUBNET_NAME, + CONST_INGRESS_APPGW_ADDON_NAME, + CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME, + CONST_INGRESS_APPGW_SUBNET_CIDR, + CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, + CONST_INGRESS_APPGW_SUBNET_ID, + CONST_INGRESS_APPGW_WATCH_NAMESPACE, + CONST_OPEN_SERVICE_MESH_ADDON_NAME, + CONST_CONFCOM_ADDON_NAME, + CONST_ACC_SGX_QUOTE_HELPER_ENABLED, + CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, + CONST_SECRET_ROTATION_ENABLED, + CONST_ROTATION_POLL_INTERVAL, + CONST_KUBE_DASHBOARD_ADDON_NAME, +) from .vendored_sdks.azure_mgmt_preview_aks.v2022_05_02_preview.models import ( ManagedClusterIngressProfile, ManagedClusterIngressProfileWebAppRouting, diff --git a/src/aks-preview/azext_aks_preview/commands.py b/src/aks-preview/azext_aks_preview/commands.py index b9a1c8500a0..6a12673c50b 100644 --- a/src/aks-preview/azext_aks_preview/commands.py +++ b/src/aks-preview/azext_aks_preview/commands.py @@ -16,7 +16,6 @@ from ._format import aks_addon_list_available_table_format, aks_addon_list_table_format, aks_addon_show_table_format from ._format import aks_agentpool_show_table_format from ._format import aks_agentpool_list_table_format -from ._format import aks_versions_table_format from ._format import aks_upgrades_table_format from ._format import aks_pod_identities_table_format from ._format import aks_pod_identity_exceptions_table_format diff --git a/src/aks-preview/azext_aks_preview/custom.py b/src/aks-preview/azext_aks_preview/custom.py index 5e00751ee45..9fb647e83a4 100644 --- a/src/aks-preview/azext_aks_preview/custom.py +++ b/src/aks-preview/azext_aks_preview/custom.py @@ -127,9 +127,7 @@ ) from .vendored_sdks.azure_mgmt_preview_aks.v2022_05_02_preview.models import ( - AgentPool, AgentPoolUpgradeSettings, - ContainerServiceStorageProfileTypes, CreationData, KubeletConfig, LinuxOSConfig, @@ -2388,10 +2386,9 @@ def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name, pod_identity.binding_selector = binding_selector pod_identities.append(pod_identity) - from azext_aks_preview.decorator import AKSPreviewModels - + from azext_aks_preview.managed_cluster_decorator import AKSPreviewManagedClusterModels # store all the models used by pod identity - pod_identity_models = AKSPreviewModels( + pod_identity_models = AKSPreviewManagedClusterModels( cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models _update_addon_pod_identity( instance, enable=True, @@ -2418,10 +2415,9 @@ def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name, continue pod_identities.append(pod_identity) - from azext_aks_preview.decorator import AKSPreviewModels - + from azext_aks_preview.managed_cluster_decorator import AKSPreviewManagedClusterModels # store all the models used by pod identity - pod_identity_models = AKSPreviewModels( + pod_identity_models = AKSPreviewManagedClusterModels( cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models _update_addon_pod_identity( instance, enable=True, @@ -2451,10 +2447,9 @@ def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_nam name=exc_name, namespace=exc_namespace, pod_labels=pod_labels) pod_identity_exceptions.append(exc) - from azext_aks_preview.decorator import AKSPreviewModels - + from azext_aks_preview.managed_cluster_decorator import AKSPreviewManagedClusterModels # store all the models used by pod identity - pod_identity_models = AKSPreviewModels( + pod_identity_models = AKSPreviewManagedClusterModels( cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models _update_addon_pod_identity( instance, enable=True, @@ -2480,10 +2475,9 @@ def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_ continue pod_identity_exceptions.append(exc) - from azext_aks_preview.decorator import AKSPreviewModels - + from azext_aks_preview.managed_cluster_decorator import AKSPreviewManagedClusterModels # store all the models used by pod identity - pod_identity_models = AKSPreviewModels( + pod_identity_models = AKSPreviewManagedClusterModels( cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models _update_addon_pod_identity( instance, enable=True, @@ -2517,10 +2511,9 @@ def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_ raise CLIError( 'pod identity exception {}/{} not found'.format(exc_namespace, exc_name)) - from azext_aks_preview.decorator import AKSPreviewModels - + from azext_aks_preview.managed_cluster_decorator import AKSPreviewManagedClusterModels # store all the models used by pod identity - pod_identity_models = AKSPreviewModels( + pod_identity_models = AKSPreviewManagedClusterModels( cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models _update_addon_pod_identity( instance, enable=True, diff --git a/src/aks-preview/azext_aks_preview/decorator.py b/src/aks-preview/azext_aks_preview/decorator.py deleted file mode 100644 index 53bddfe6607..00000000000 --- a/src/aks-preview/azext_aks_preview/decorator.py +++ /dev/null @@ -1,3273 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import base64 -import os -import time -from types import SimpleNamespace -from typing import Dict, List, Tuple, TypeVar, Union, Optional - -from azure.cli.command_modules.acs._consts import ( - DecoratorEarlyExitException, - DecoratorMode, -) -from azure.cli.command_modules.acs.decorator import ( - AKSContext, - AKSCreateDecorator, - AKSModels, - AKSUpdateDecorator, - check_is_msi_cluster, - safe_list_get, - safe_lower, -) -from azure.cli.core import AzCommandsLoader -from azure.cli.core.azclierror import ( - ArgumentUsageError, - AzCLIError, - CLIInternalError, - InvalidArgumentValueError, - MutuallyExclusiveArgumentError, - RequiredArgumentMissingError, - UnknownError, -) -from azure.cli.core.commands import AzCliCommand -from azure.cli.core.profiles import ResourceType -from azure.cli.core.util import get_file_json, read_file_content -from azure.core.exceptions import HttpResponseError -from knack.log import get_logger -from knack.prompting import prompt_y_n -from msrestazure.azure_exceptions import CloudError - -from azext_aks_preview._consts import ( - CONST_OUTBOUND_TYPE_LOAD_BALANCER, - CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY, - CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY, - CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, - CONST_DISK_DRIVER_V1, - CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PUBLIC, - CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE, -) -from azext_aks_preview._loadbalancer import create_load_balancer_profile -from azext_aks_preview._loadbalancer import ( - update_load_balancer_profile as _update_load_balancer_profile, -) -from azext_aks_preview._natgateway import ( - create_nat_gateway_profile, - is_nat_gateway_profile_provided, -) -from azext_aks_preview._natgateway import ( - update_nat_gateway_profile as _update_nat_gateway_profile, -) -from azext_aks_preview._podidentity import ( - _fill_defaults_for_pod_identity_profile, - _is_pod_identity_addon_enabled, - _update_addon_pod_identity, -) -from azext_aks_preview.addonconfiguration import ( - ensure_container_insights_for_monitoring, - ensure_default_log_analytics_workspace_for_monitoring, -) -from azext_aks_preview.custom import ( - _get_snapshot, - _get_cluster_snapshot, - _ensure_cluster_identity_permission_on_kubelet_identity, -) - -logger = get_logger(__name__) - -# type variables -ContainerServiceClient = TypeVar("ContainerServiceClient") -Identity = TypeVar("Identity") -ManagedCluster = TypeVar("ManagedCluster") -ManagedClusterLoadBalancerProfile = TypeVar( - "ManagedClusterLoadBalancerProfile") -ResourceReference = TypeVar("ResourceReference") -KubeletConfig = TypeVar("KubeletConfig") -LinuxOSConfig = TypeVar("LinuxOSConfig") -ManagedClusterHTTPProxyConfig = TypeVar("ManagedClusterHTTPProxyConfig") -ContainerServiceNetworkProfile = TypeVar("ContainerServiceNetworkProfile") -ManagedClusterAddonProfile = TypeVar("ManagedClusterAddonProfile") -ManagedClusterOIDCIssuerProfile = TypeVar('ManagedClusterOIDCIssuerProfile') -ManagedClusterSecurityProfileWorkloadIdentity = TypeVar('ManagedClusterSecurityProfileWorkloadIdentity') -ManagedClusterStorageProfile = TypeVar('ManagedClusterStorageProfile') -ManagedClusterStorageProfileDiskCSIDriver = TypeVar('ManagedClusterStorageProfileDiskCSIDriver') -ManagedClusterStorageProfileFileCSIDriver = TypeVar('ManagedClusterStorageProfileFileCSIDriver') -ManagedClusterStorageProfileSnapshotController = TypeVar('ManagedClusterStorageProfileSnapshotController') -ManagedClusterAPIServerAccessProfile = TypeVar('ManagedClusterAPIServerAccessProfile') -Snapshot = TypeVar("Snapshot") -ManagedClusterSnapshot = TypeVar("ManagedClusterSnapshot") -AzureKeyVaultKms = TypeVar('AzureKeyVaultKms') -ManagedClusterIngressProfile = TypeVar('ManagedClusterIngressProfile') -ManagedClusterIngressProfileWebAppRouting = TypeVar('ManagedClusterIngressProfileWebAppRouting') -ManagedClusterWorkloadAutoScalerProfile = TypeVar('ManagedClusterWorkloadAutoScalerProfile') -ManagedClusterWorkloadAutoScalerProfileKeda = TypeVar('ManagedClusterWorkloadAutoScalerProfileKeda') - - -# pylint: disable=too-many-instance-attributes,too-few-public-methods -class AKSPreviewModels(AKSModels): - def __init__(self, cmd: AzCommandsLoader, resource_type: ResourceType): - super().__init__(cmd, resource_type=resource_type) - self.__cmd = cmd - self.KubeletConfig = self.__cmd.get_models( - "KubeletConfig", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.LinuxOSConfig = self.__cmd.get_models( - "LinuxOSConfig", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterHTTPProxyConfig = self.__cmd.get_models( - "ManagedClusterHTTPProxyConfig", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.WindowsGmsaProfile = self.__cmd.get_models( - "WindowsGmsaProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.CreationData = self.__cmd.get_models( - "CreationData", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterOIDCIssuerProfile = self.__cmd.get_models( - "ManagedClusterOIDCIssuerProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterSecurityProfileWorkloadIdentity = self.__cmd.get_models( - "ManagedClusterSecurityProfileWorkloadIdentity", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterSecurityProfile = self.__cmd.get_models( - "ManagedClusterSecurityProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterIngressProfileWebAppRouting = self.__cmd.get_models( - "ManagedClusterIngressProfileWebAppRouting", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterIngressProfile = self.__cmd.get_models( - "ManagedClusterIngressProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.AzureKeyVaultKms = self.__cmd.get_models( - "AzureKeyVaultKms", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterStorageProfile = self.__cmd.get_models( - "ManagedClusterStorageProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterStorageProfileDiskCSIDriver = self.__cmd.get_models( - "ManagedClusterStorageProfileDiskCSIDriver", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterStorageProfileFileCSIDriver = self.__cmd.get_models( - "ManagedClusterStorageProfileFileCSIDriver", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterStorageProfileSnapshotController = self.__cmd.get_models( - "ManagedClusterStorageProfileSnapshotController", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models( - "ManagedClusterAPIServerAccessProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterWorkloadAutoScalerProfile = self.__cmd.get_models( - "ManagedClusterWorkloadAutoScalerProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.ManagedClusterWorkloadAutoScalerProfileKeda = self.__cmd.get_models( - "ManagedClusterWorkloadAutoScalerProfileKeda", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - # holder for nat gateway related models - self.__nat_gateway_models = None - # holder for pod identity related models - self.__pod_identity_models = None - - @property - def nat_gateway_models(self) -> SimpleNamespace: - """Get nat gateway related models. - - The models are stored in a SimpleNamespace object, could be accessed by the dot operator like - `nat_gateway_models.ManagedClusterNATGatewayProfile`. - - :return: SimpleNamespace - """ - if self.__nat_gateway_models is None: - nat_gateway_models = {} - nat_gateway_models["ManagedClusterNATGatewayProfile"] = self.__cmd.get_models( - "ManagedClusterNATGatewayProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - nat_gateway_models["ManagedClusterManagedOutboundIPProfile"] = self.__cmd.get_models( - "ManagedClusterManagedOutboundIPProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.__nat_gateway_models = SimpleNamespace(**nat_gateway_models) - return self.__nat_gateway_models - - @property - def pod_identity_models(self) -> SimpleNamespace: - """Get pod identity related models. - - The models are stored in a SimpleNamespace object, could be accessed by the dot operator like - `pod_identity_models.ManagedClusterPodIdentityProfile`. - - :return: SimpleNamespace - """ - if self.__pod_identity_models is None: - pod_identity_models = {} - pod_identity_models["ManagedClusterPodIdentityProfile"] = self.__cmd.get_models( - "ManagedClusterPodIdentityProfile", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - pod_identity_models["ManagedClusterPodIdentityException"] = self.__cmd.get_models( - "ManagedClusterPodIdentityException", - resource_type=self.resource_type, - operation_group="managed_clusters", - ) - self.__pod_identity_models = SimpleNamespace(**pod_identity_models) - return self.__pod_identity_models - - -# pylint: disable=too-many-public-methods -class AKSPreviewContext(AKSContext): - def __init__( - self, - cmd: AzCliCommand, - raw_parameters: Dict, - models: AKSPreviewModels, - decorator_mode, - ): - super().__init__(cmd, raw_parameters, models, decorator_mode) - - # pylint: disable=no-self-use - def __validate_pod_identity_with_kubenet(self, mc, enable_pod_identity, enable_pod_identity_with_kubenet): - """Helper function to check the validity of serveral pod identity related parameters. - - If network_profile has been set up in `mc`, network_plugin equals to "kubenet" and enable_pod_identity is - specified but enable_pod_identity_with_kubenet is not, raise a RequiredArgumentMissingError. - - :return: None - """ - if ( - mc and - mc.network_profile and - safe_lower(mc.network_profile.network_plugin) == "kubenet" - ): - if enable_pod_identity and not enable_pod_identity_with_kubenet: - raise RequiredArgumentMissingError( - "--enable-pod-identity-with-kubenet is required for enabling pod identity addon " - "when using Kubenet network plugin" - ) - - # pylint: disable=no-self-use - def __validate_gmsa_options( - self, - enable_windows_gmsa, - gmsa_dns_server, - gmsa_root_domain_name, - yes, - ) -> None: - """Helper function to validate gmsa related options. - - When enable_windows_gmsa is specified, if both gmsa_dns_server and gmsa_root_domain_name are not assigned and - user does not confirm the operation, a DecoratorEarlyExitException will be raised; if only one of - gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError. When - enable_windows_gmsa is not specified, if any of gmsa_dns_server or gmsa_root_domain_name is assigned, raise - a RequiredArgumentMissingError. - - :return: bool - """ - gmsa_dns_server_is_none = gmsa_dns_server is None - gmsa_root_domain_name_is_none = gmsa_root_domain_name is None - if enable_windows_gmsa: - if gmsa_dns_server_is_none == gmsa_root_domain_name_is_none: - if gmsa_dns_server_is_none: - msg = ( - "Please assure that you have set the DNS server in the vnet used by the cluster " - "when not specifying --gmsa-dns-server and --gmsa-root-domain-name" - ) - if not yes and not prompt_y_n(msg, default="n"): - raise DecoratorEarlyExitException() - else: - raise RequiredArgumentMissingError( - "You must set or not set --gmsa-dns-server and --gmsa-root-domain-name at the same time." - ) - else: - if gmsa_dns_server_is_none != gmsa_root_domain_name_is_none: - raise RequiredArgumentMissingError( - "You only can set --gmsa-dns-server and --gmsa-root-domain-name " - "when setting --enable-windows-gmsa." - ) - - # pylint: disable=unused-argument - def _get_vm_set_type(self, read_only: bool = False) -> Union[str, None]: - """Internal function to dynamically obtain the value of vm_set_type according to the context. - - Note: Inherited and extended in aks-preview to add support for the deprecated option --enable-vmss. - - :return: string or None - """ - vm_set_type = super()._get_vm_set_type(read_only) - - # TODO: Remove the below section when we deprecate the --enable-vmss flag, kept for back-compatibility only. - # read the original value passed by the command - enable_vmss = self.raw_param.get("enable_vmss") - - if enable_vmss: - if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower(): - raise InvalidArgumentValueError( - "--enable-vmss and provided --vm-set-type ({}) are conflicting with each other".format( - vm_set_type - ) - ) - vm_set_type = "VirtualMachineScaleSets" - return vm_set_type - - def get_zones(self) -> Union[List[str], None]: - """Obtain the value of zones. - - Note: Inherited and extended in aks-preview to add support for a different parameter name (node_zones). - - :return: list of strings or None - """ - zones = super().get_zones() - if zones is not None: - return zones - # read the original value passed by the command - return self.raw_param.get("node_zones") - - def get_pod_subnet_id(self) -> Union[str, None]: - """Obtain the value of pod_subnet_id. - - :return: bool - """ - # read the original value passed by the command - pod_subnet_id = self.raw_param.get("pod_subnet_id") - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - agent_pool_profile.pod_subnet_id is not None - ): - pod_subnet_id = agent_pool_profile.pod_subnet_id - - # this parameter does not need dynamic completion - # this parameter does not need validation - return pod_subnet_id - - def get_enable_fips_image(self) -> bool: - """Obtain the value of enable_fips_image. - - :return: bool - """ - # read the original value passed by the command - enable_fips_image = self.raw_param.get("enable_fips_image") - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - agent_pool_profile.enable_fips is not None - ): - enable_fips_image = agent_pool_profile.enable_fips - - # this parameter does not need dynamic completion - # this parameter does not need validation - return enable_fips_image - - def get_workload_runtime(self) -> Union[str, None]: - """Obtain the value of workload_runtime. - - :return: string or None - """ - # read the original value passed by the command - workload_runtime = self.raw_param.get("workload_runtime") - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - # backward compatibility - hasattr(agent_pool_profile, "workload_runtime") and - agent_pool_profile.workload_runtime is not None - ): - workload_runtime = agent_pool_profile.workload_runtime - - # this parameter does not need dynamic completion - # this parameter does not need validation - return workload_runtime - - def get_gpu_instance_profile(self) -> Union[str, None]: - """Obtain the value of gpu_instance_profile. - - :return: string or None - """ - # read the original value passed by the command - gpu_instance_profile = self.raw_param.get("gpu_instance_profile") - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - # backward compatibility - hasattr(agent_pool_profile, "gpu_instance_profile") and - agent_pool_profile.gpu_instance_profile is not None - ): - gpu_instance_profile = agent_pool_profile.gpu_instance_profile - - # this parameter does not need dynamic completion - # this parameter does not need validation - return gpu_instance_profile - - def get_message_of_the_day(self) -> Union[str, None]: - """Obtain the value of message_of_the_day. - - :return: string or None - """ - # read the original value passed by the command - message_of_the_day = None - message_of_the_day_file_path = self.raw_param.get("message_of_the_day") - - if message_of_the_day_file_path: - if not os.path.isfile(message_of_the_day_file_path): - raise InvalidArgumentValueError( - "{} is not valid file, or not accessable.".format( - message_of_the_day_file_path - ) - ) - message_of_the_day = read_file_content( - message_of_the_day_file_path) - message_of_the_day = base64.b64encode( - bytes(message_of_the_day, 'ascii')).decode('ascii') - - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - # backward compatibility - hasattr(agent_pool_profile, "message_of_the_day") and - agent_pool_profile.message_of_the_day is not None - ): - message_of_the_day = agent_pool_profile.message_of_the_day - - # this parameter does not need dynamic completion - # this parameter does not need validation - return message_of_the_day - - def get_enable_custom_ca_trust(self) -> Union[bool, None]: - """Obtain the value of enable_custom_ca_trust. - - :return: bool or None - """ - # read the original value passed by the command - enable_custom_ca_trust = self.raw_param.get("enable_custom_ca_trust") - - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - # backward compatibility - hasattr(agent_pool_profile, "enable_custom_ca_trust") and - agent_pool_profile.enable_custom_ca_trust is not None - ): - enable_custom_ca_trust = agent_pool_profile.enable_custom_ca_trust - - # this parameter does not need dynamic completion - # this parameter does not need validation - return enable_custom_ca_trust - - def get_kubelet_config(self) -> Union[dict, KubeletConfig, None]: - """Obtain the value of kubelet_config. - - :return: dict, KubeletConfig or None - """ - # read the original value passed by the command - kubelet_config = None - kubelet_config_file_path = self.raw_param.get("kubelet_config") - # validate user input - if kubelet_config_file_path: - if not os.path.isfile(kubelet_config_file_path): - raise InvalidArgumentValueError( - "{} is not valid file, or not accessable.".format( - kubelet_config_file_path - ) - ) - kubelet_config = get_file_json(kubelet_config_file_path) - if not isinstance(kubelet_config, dict): - raise InvalidArgumentValueError( - "Error reading kubelet configuration from {}. " - "Please see https://aka.ms/CustomNodeConfig for correct format.".format( - kubelet_config_file_path - ) - ) - - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - agent_pool_profile.kubelet_config is not None - ): - kubelet_config = agent_pool_profile.kubelet_config - - # this parameter does not need dynamic completion - # this parameter does not need validation - return kubelet_config - - def get_linux_os_config(self) -> Union[dict, LinuxOSConfig, None]: - """Obtain the value of linux_os_config. - - :return: dict, LinuxOSConfig or None - """ - # read the original value passed by the command - linux_os_config = None - linux_os_config_file_path = self.raw_param.get("linux_os_config") - # validate user input - if linux_os_config_file_path: - if not os.path.isfile(linux_os_config_file_path): - raise InvalidArgumentValueError( - "{} is not valid file, or not accessable.".format( - linux_os_config_file_path - ) - ) - linux_os_config = get_file_json(linux_os_config_file_path) - if not isinstance(linux_os_config, dict): - raise InvalidArgumentValueError( - "Error reading Linux OS configuration from {}. " - "Please see https://aka.ms/CustomNodeConfig for correct format.".format( - linux_os_config_file_path - ) - ) - - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - agent_pool_profile.linux_os_config is not None - ): - linux_os_config = agent_pool_profile.linux_os_config - - # this parameter does not need dynamic completion - # this parameter does not need validation - return linux_os_config - - def get_http_proxy_config(self) -> Union[dict, ManagedClusterHTTPProxyConfig, None]: - """Obtain the value of http_proxy_config. - - :return: dict, ManagedClusterHTTPProxyConfig or None - """ - # read the original value passed by the command - http_proxy_config = None - http_proxy_config_file_path = self.raw_param.get("http_proxy_config") - # validate user input - if http_proxy_config_file_path: - if not os.path.isfile(http_proxy_config_file_path): - raise InvalidArgumentValueError( - "{} is not valid file, or not accessable.".format( - http_proxy_config_file_path - ) - ) - http_proxy_config = get_file_json(http_proxy_config_file_path) - if not isinstance(http_proxy_config, dict): - raise InvalidArgumentValueError( - "Error reading Http Proxy Config from {}. " - "Please see https://aka.ms/HttpProxyConfig for correct format.".format( - http_proxy_config_file_path - ) - ) - - # try to read the property value corresponding to the parameter from the `mc` object - if self.decorator_mode == DecoratorMode.CREATE: - if self.mc and self.mc.http_proxy_config is not None: - http_proxy_config = self.mc.http_proxy_config - - # this parameter does not need dynamic completion - # this parameter does not need validation - return http_proxy_config - - def get_node_resource_group(self) -> Union[str, None]: - """Obtain the value of node_resource_group. - - :return: string or None - """ - # read the original value passed by the command - node_resource_group = self.raw_param.get("node_resource_group") - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.node_resource_group is not None: - node_resource_group = self.mc.node_resource_group - - # this parameter does not need dynamic completion - # this parameter does not need validation - return node_resource_group - - def get_nat_gateway_managed_outbound_ip_count(self) -> Union[int, None]: - """Obtain the value of nat_gateway_managed_outbound_ip_count. - - Note: SDK provides default value 1 and performs the following validation {'maximum': 16, 'minimum': 1}. - - :return: int or None - """ - # read the original value passed by the command - nat_gateway_managed_outbound_ip_count = self.raw_param.get( - "nat_gateway_managed_outbound_ip_count") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.nat_gateway_profile and - self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile and - self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count is not None - ): - nat_gateway_managed_outbound_ip_count = ( - self.mc.network_profile.nat_gateway_profile.managed_outbound_ip_profile.count - ) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return nat_gateway_managed_outbound_ip_count - - def get_nat_gateway_idle_timeout(self) -> Union[int, None]: - """Obtain the value of nat_gateway_idle_timeout. - - Note: SDK provides default value 4 and performs the following validation {'maximum': 120, 'minimum': 4}. - - :return: int or None - """ - # read the original value passed by the command - nat_gateway_idle_timeout = self.raw_param.get( - "nat_gateway_idle_timeout") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.nat_gateway_profile and - self.mc.network_profile.nat_gateway_profile.idle_timeout_in_minutes is not None - ): - nat_gateway_idle_timeout = ( - self.mc.network_profile.nat_gateway_profile.idle_timeout_in_minutes - ) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return nat_gateway_idle_timeout - - def _get_enable_pod_security_policy(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_pod_security_policy. - - This function supports the option of enable_validation. When enabled, if both enable_pod_security_policy and - disable_pod_security_policy are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - # read the original value passed by the command - enable_pod_security_policy = self.raw_param.get( - "enable_pod_security_policy") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.enable_pod_security_policy is not None - ): - enable_pod_security_policy = self.mc.enable_pod_security_policy - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if enable_pod_security_policy and self._get_disable_pod_security_policy(enable_validation=False): - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-pod-security-policy and " - "--disable-pod-security-policy at the same time." - ) - return enable_pod_security_policy - - def get_enable_pod_security_policy(self) -> bool: - """Obtain the value of enable_pod_security_policy. - - This function will verify the parameter by default. If both enable_pod_security_policy and - disable_pod_security_policy are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - return self._get_enable_pod_security_policy(enable_validation=True) - - def _get_disable_pod_security_policy(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of disable_pod_security_policy. - - This function supports the option of enable_validation. When enabled, if both enable_pod_security_policy and - disable_pod_security_policy are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - # read the original value passed by the command - disable_pod_security_policy = self.raw_param.get( - "disable_pod_security_policy") - # We do not support this option in create mode, therefore we do not read the value from `mc`. - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if disable_pod_security_policy and self._get_enable_pod_security_policy(enable_validation=False): - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-pod-security-policy and " - "--disable-pod-security-policy at the same time." - ) - return disable_pod_security_policy - - def get_disable_pod_security_policy(self) -> bool: - """Obtain the value of disable_pod_security_policy. - - This function will verify the parameter by default. If both enable_pod_security_policy and - disable_pod_security_policy are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - return self._get_disable_pod_security_policy(enable_validation=True) - - # pylint: disable=unused-argument - def _get_enable_managed_identity( - self, enable_validation: bool = False, read_only: bool = False - ) -> bool: - """Internal function to obtain the value of enable_managed_identity. - - Note: Inherited and extended in aks-preview to perform additional validation. - - This function supports the option of enable_validation. When enabled, if enable_managed_identity is not - specified but enable_pod_identity is, raise a RequiredArgumentMissingError. - - :return: bool - """ - enable_managed_identity = super()._get_enable_managed_identity( - enable_validation, read_only) - # additional validation - if enable_validation: - if self.decorator_mode == DecoratorMode.CREATE: - if not enable_managed_identity and self._get_enable_pod_identity(enable_validation=False): - raise RequiredArgumentMissingError( - "--enable-pod-identity can only be specified when --enable-managed-identity is specified" - ) - elif self.decorator_mode == DecoratorMode.UPDATE: - if not check_is_msi_cluster(self.mc) and self._get_enable_pod_identity(enable_validation=False): - raise RequiredArgumentMissingError( - "--enable-pod-identity can only be specified for cluster enabled managed identity" - ) - return enable_managed_identity - - def _get_enable_pod_identity(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_managed_identity. - - This function supports the option of enable_validation. When enabled, if enable_managed_identity is not - specified but enable_pod_identity is, raise a RequiredArgumentMissingError. Will also call function - "__validate_pod_identity_with_kubenet" for verification. In update mode, if both - enable_pod_identity and disable_pod_identity are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - # read the original value passed by the command - enable_pod_identity = self.raw_param.get("enable_pod_identity") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.pod_identity_profile and - self.mc.pod_identity_profile.enabled is not None - ): - enable_pod_identity = self.mc.pod_identity_profile.enabled - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if self.decorator_mode == DecoratorMode.CREATE: - if enable_pod_identity and not self._get_enable_managed_identity(enable_validation=False): - raise RequiredArgumentMissingError( - "--enable-pod-identity can only be specified when --enable-managed-identity is specified" - ) - # validate pod identity with kubenet plugin - self.__validate_pod_identity_with_kubenet( - self.mc, - enable_pod_identity, - self._get_enable_pod_identity_with_kubenet( - enable_validation=False - ), - ) - elif self.decorator_mode == DecoratorMode.UPDATE: - if enable_pod_identity: - if not check_is_msi_cluster(self.mc): - raise RequiredArgumentMissingError( - "--enable-pod-identity can only be specified for cluster enabled managed identity" - ) - if self._get_disable_pod_identity(enable_validation=False): - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-pod-identity and " - "--disable-pod-identity at the same time." - ) - return enable_pod_identity - - def get_enable_pod_identity(self) -> bool: - """Obtain the value of enable_pod_identity. - - This function will verify the parameter by default. If enable_managed_identity is not specified but - enable_pod_identity is, raise a RequiredArgumentMissingError. Will also call function - "__validate_pod_identity_with_kubenet" for verification. In update mode, if both enable_pod_identity and - disable_pod_identity are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - - return self._get_enable_pod_identity(enable_validation=True) - - def _get_disable_pod_identity(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of disable_pod_identity. - - This function supports the option of enable_validation. When enabled, in update mode, if both - enable_pod_identity and disable_pod_identity are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - # read the original value passed by the command - disable_pod_identity = self.raw_param.get("disable_pod_identity") - # We do not support this option in create mode, therefore we do not read the value from `mc`. - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if self.decorator_mode == DecoratorMode.UPDATE: - if disable_pod_identity and self._get_enable_pod_identity(enable_validation=False): - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-pod-identity and " - "--disable-pod-identity at the same time." - ) - return disable_pod_identity - - def get_disable_pod_identity(self) -> bool: - """Obtain the value of disable_pod_identity. - - This function will verify the parameter by default. When enabled, in update mode, if both - enable_pod_identity and disable_pod_identity are specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - - return self._get_disable_pod_identity(enable_validation=True) - - def _get_enable_pod_identity_with_kubenet(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_pod_identity_with_kubenet. - - This function supports the option of enable_validation. When enabled, will call function - "__validate_pod_identity_with_kubenet" for verification. - - :return: bool - """ - # read the original value passed by the command - enable_pod_identity_with_kubenet = self.raw_param.get( - "enable_pod_identity_with_kubenet") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.pod_identity_profile and - self.mc.pod_identity_profile.allow_network_plugin_kubenet is not None - ): - enable_pod_identity_with_kubenet = self.mc.pod_identity_profile.allow_network_plugin_kubenet - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if self.decorator_mode == DecoratorMode.CREATE: - self.__validate_pod_identity_with_kubenet( - self.mc, - self._get_enable_pod_identity(enable_validation=False), - enable_pod_identity_with_kubenet, - ) - return enable_pod_identity_with_kubenet - - def get_enable_pod_identity_with_kubenet(self) -> bool: - """Obtain the value of enable_pod_identity_with_kubenet. - - This function will verify the parameter by default. Will call function "__validate_pod_identity_with_kubenet" - for verification. - - :return: bool - """ - return self._get_enable_pod_identity_with_kubenet(enable_validation=True) - - def get_addon_consts(self) -> Dict[str, str]: - """Helper function to obtain the constants used by addons. - - Note: Inherited and extended in aks-preview to replace and add a few values. - - Note: This is not a parameter of aks commands. - - :return: dict - """ - from azext_aks_preview._consts import ( - ADDONS, CONST_GITOPS_ADDON_NAME, - CONST_MONITORING_USING_AAD_MSI_AUTH) - - addon_consts = super().get_addon_consts() - addon_consts["ADDONS"] = ADDONS - addon_consts["CONST_GITOPS_ADDON_NAME"] = CONST_GITOPS_ADDON_NAME - addon_consts[ - "CONST_MONITORING_USING_AAD_MSI_AUTH" - ] = CONST_MONITORING_USING_AAD_MSI_AUTH - return addon_consts - - def get_appgw_subnet_prefix(self) -> Union[str, None]: - """Obtain the value of appgw_subnet_prefix. - - [Deprecated] Note: this parameter is depracated and replaced by appgw_subnet_cidr. - - :return: string or None - """ - # determine the value of constants - addon_consts = self.get_addon_consts() - CONST_INGRESS_APPGW_ADDON_NAME = addon_consts.get( - "CONST_INGRESS_APPGW_ADDON_NAME") - CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get( - "CONST_INGRESS_APPGW_SUBNET_CIDR") - - # read the original value passed by the command - appgw_subnet_prefix = self.raw_param.get("appgw_subnet_prefix") - # try to read the property value corresponding to the parameter from the `mc` object - if ( - self.mc and - self.mc.addon_profiles and - CONST_INGRESS_APPGW_ADDON_NAME in self.mc.addon_profiles and - self.mc.addon_profiles.get( - CONST_INGRESS_APPGW_ADDON_NAME - ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) is not None - ): - appgw_subnet_prefix = self.mc.addon_profiles.get( - CONST_INGRESS_APPGW_ADDON_NAME - ).config.get(CONST_INGRESS_APPGW_SUBNET_CIDR) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return appgw_subnet_prefix - - def get_enable_msi_auth_for_monitoring(self) -> Union[bool, None]: - """Obtain the value of enable_msi_auth_for_monitoring. - - Note: The arg type of this parameter supports three states (True, False or None), but the corresponding default - value in entry function is not None. - - :return: bool or None - """ - # determine the value of constants - addon_consts = self.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get( - "CONST_MONITORING_ADDON_NAME") - CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get( - "CONST_MONITORING_USING_AAD_MSI_AUTH") - - # read the original value passed by the command - enable_msi_auth_for_monitoring = self.raw_param.get( - "enable_msi_auth_for_monitoring") - # try to read the property value corresponding to the parameter from the `mc` object - if ( - self.mc and - self.mc.addon_profiles and - CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and - self.mc.addon_profiles.get( - CONST_MONITORING_ADDON_NAME - ).config.get(CONST_MONITORING_USING_AAD_MSI_AUTH) is not None - ): - enable_msi_auth_for_monitoring = self.mc.addon_profiles.get( - CONST_MONITORING_ADDON_NAME - ).config.get(CONST_MONITORING_USING_AAD_MSI_AUTH) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return enable_msi_auth_for_monitoring - - def get_no_wait(self) -> bool: - """Obtain the value of no_wait. - - Note: Inherited and extended in aks-preview to replace the set value when enable_msi_auth_for_monitoring is - specified. - - Note: no_wait will not be decorated into the `mc` object. - - :return: bool - """ - no_wait = super().get_no_wait() - - if self.get_intermediate("monitoring") and self.get_enable_msi_auth_for_monitoring(): - logger.warning( - "Enabling msi auth for monitoring addon requires waiting for cluster creation to complete") - if no_wait: - logger.warning("The set option '--no-wait' has been ignored") - no_wait = False - return no_wait - - # TOOD: may remove this function after the fix for the internal function get merged and released - # pylint: disable=unused-argument - def _get_workspace_resource_id( - self, enable_validation: bool = False, read_only: bool = False - ) -> Union[str, None]: # pragma: no cover - """Internal function to dynamically obtain the value of workspace_resource_id according to the context. - - Note: Overwritten in aks-preview to replace the internal function. - - When workspace_resource_id is not assigned, dynamic completion will be triggerd. Function - "ensure_default_log_analytics_workspace_for_monitoring" will be called to create a workspace with - subscription_id and resource_group_name, which internally used ResourceManagementClient to send the request. - - This function supports the option of enable_validation. When enabled, it will check if workspace_resource_id is - assigned but 'monitoring' is not specified in enable_addons, if so, raise a RequiredArgumentMissingError. - This function supports the option of read_only. When enabled, it will skip dynamic completion and validation. - - :return: string or None - """ - # determine the value of constants - addon_consts = self.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get( - "CONST_MONITORING_ADDON_NAME") - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get( - "CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID" - ) - - # read the original value passed by the command - workspace_resource_id = self.raw_param.get("workspace_resource_id") - # try to read the property value corresponding to the parameter from the `mc` object - read_from_mc = False - if ( - self.mc and - self.mc.addon_profiles and - CONST_MONITORING_ADDON_NAME in self.mc.addon_profiles and - self.mc.addon_profiles.get( - CONST_MONITORING_ADDON_NAME - ).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID) is not None - ): - workspace_resource_id = self.mc.addon_profiles.get( - CONST_MONITORING_ADDON_NAME - ).config.get(CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID) - read_from_mc = True - - # skip dynamic completion & validation if option read_only is specified - if read_only: - return workspace_resource_id - - # dynamic completion - if not read_from_mc: - if workspace_resource_id is None: - # use default workspace if exists else create default workspace - workspace_resource_id = ( - ensure_default_log_analytics_workspace_for_monitoring( - self.cmd, - self.get_subscription_id(), - self.get_resource_group_name(), - ) - ) - # normalize - workspace_resource_id = "/" + workspace_resource_id.strip(" /") - - # validation - if enable_validation: - enable_addons = self._get_enable_addons(enable_validation=False) - if workspace_resource_id and "monitoring" not in enable_addons: - raise RequiredArgumentMissingError( - '"--workspace-resource-id" requires "--enable-addons monitoring".') - - # this parameter does not need validation - return workspace_resource_id - - def get_pod_cidrs_and_service_cidrs_and_ip_families(self) -> Tuple[ - Union[List[str], None], - Union[List[str], None], - Union[List[str], None], - ]: - return self.get_pod_cidrs(), self.get_service_cidrs(), self.get_ip_families() - - def get_ip_families(self) -> Union[List[str], None]: - """IPFamilies used for the cluster network. - - :return: List[str] or None - """ - return self._get_list_attr('ip_families') - - def get_pod_cidrs(self) -> Union[List[str], None]: - """Obtain the CIDR ranges used for pod subnets. - - :return: List[str] or None - """ - return self._get_list_attr('pod_cidrs') - - def get_service_cidrs(self) -> Union[List[str], None]: - """Obtain the CIDR ranges for the service subnet. - - :return: List[str] or None - """ - return self._get_list_attr('service_cidrs') - - def _get_list_attr(self, param_key) -> Union[List[str], None]: - param = self.raw_param.get(param_key) - - if param is not None: - return param.split(',') if param else [] - - return None - - def get_load_balancer_managed_outbound_ip_count(self) -> Union[int, None]: - """Obtain the value of load_balancer_managed_outbound_ip_count. - - Note: Overwritten in aks-preview to preserve value from `mc` in update mode under certain circumstance. - - Note: SDK provides default value 1 and performs the following validation {'maximum': 100, 'minimum': 1}. - - :return: int or None - """ - # read the original value passed by the command - load_balancer_managed_outbound_ip_count = self.raw_param.get( - "load_balancer_managed_outbound_ip_count" - ) - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.load_balancer_profile and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None - ): - load_balancer_managed_outbound_ip_count = ( - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count - ) - elif self.decorator_mode == DecoratorMode.UPDATE: - if ( - not self.get_load_balancer_outbound_ips() and - not self.get_load_balancer_outbound_ip_prefixes() and - load_balancer_managed_outbound_ip_count is None - ): - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.load_balancer_profile and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count is not None - ): - load_balancer_managed_outbound_ip_count = ( - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count - ) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return load_balancer_managed_outbound_ip_count - - def get_load_balancer_managed_outbound_ipv6_count(self) -> Union[int, None]: - """Obtain the expected count of IPv6 managed outbound IPs. - - Note: SDK provides default value 0 and performs the following validation {'maximum': 100, 'minimum': 0}. - - :return: int or None - """ - count_ipv6 = self.raw_param.get( - 'load_balancer_managed_outbound_ipv6_count') - - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.load_balancer_profile and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None - ): - count_ipv6 = ( - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 - ) - elif self.decorator_mode == DecoratorMode.UPDATE: - if ( - not self.get_load_balancer_outbound_ips() and - not self.get_load_balancer_outbound_ip_prefixes() and - count_ipv6 is None - ): - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.load_balancer_profile and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps and - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 is not None - ): - count_ipv6 = ( - self.mc.network_profile.load_balancer_profile.managed_outbound_i_ps.count_ipv6 - ) - - return count_ipv6 - - # pylint: disable=unused-argument - def _get_outbound_type( - self, - enable_validation: bool = False, - read_only: bool = False, - load_balancer_profile: ManagedClusterLoadBalancerProfile = None, - ) -> Union[str, None]: - """Internal function to dynamically obtain the value of outbound_type according to the context. - - Note: Overwritten in aks-preview to add support for the newly added nat related constants. - - Note: All the external parameters involved in the validation are not verified in their own getters. - - When outbound_type is not assigned, dynamic completion will be triggerd. By default, the value is set to - CONST_OUTBOUND_TYPE_LOAD_BALANCER. - - This function supports the option of enable_validation. When enabled, if the value of outbound_type is one of - CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY, CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY or - CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, the following checks will be performed. If load_balancer_sku is set - to basic, an InvalidArgumentValueError will be raised. If the value of outbound_type is not - CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING and vnet_subnet_id is not assigned, a RequiredArgumentMissingError - will be raised. If the value of outbound_type equals to CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING and - any of load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips or load_balancer_outbound_ip_prefixes - is assigned, a MutuallyExclusiveArgumentError will be raised. - This function supports the option of read_only. When enabled, it will skip dynamic completion and validation. - This function supports the option of load_balancer_profile, if provided, when verifying loadbalancer-related - parameters, the value in load_balancer_profile will be used for validation. - - :return: string or None - """ - # read the original value passed by the command - outbound_type = self.raw_param.get("outbound_type") - # try to read the property value corresponding to the parameter from the `mc` object - read_from_mc = False - if ( - self.mc and - self.mc.network_profile and - self.mc.network_profile.outbound_type is not None - ): - outbound_type = self.mc.network_profile.outbound_type - read_from_mc = True - - # skip dynamic completion & validation if option read_only is specified - if read_only: - return outbound_type - - # dynamic completion - if ( - not read_from_mc and - outbound_type != CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY and - outbound_type != CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY and - outbound_type != CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING - ): - outbound_type = CONST_OUTBOUND_TYPE_LOAD_BALANCER - - # validation - # Note: The parameters involved in the validation are not verified in their own getters. - if enable_validation: - if outbound_type in [ - CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY, - CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY, - CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, - ]: - # Should not enable read_only for get_load_balancer_sku, since its default value is None, and it has - # not been decorated into the mc object at this time, only the value after dynamic completion is - # meaningful here. - if safe_lower(self._get_load_balancer_sku(enable_validation=False)) == "basic": - raise InvalidArgumentValueError( - "{} doesn't support basic load balancer sku".format(outbound_type)) - if outbound_type == CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY: - if self.get_vnet_subnet_id() in ["", None]: - raise RequiredArgumentMissingError( - "--vnet-subnet-id must be specified for userAssignedNATGateway and it must " - "be pre-associated with a NAT gateway with outbound public IPs or IP prefixes" - ) - if outbound_type == CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING: - if self.get_vnet_subnet_id() in ["", None]: - raise RequiredArgumentMissingError( - "--vnet-subnet-id must be specified for userDefinedRouting and it must " - "be pre-configured with a route table with egress rules" - ) - if load_balancer_profile: - if ( - load_balancer_profile.managed_outbound_i_ps or - load_balancer_profile.outbound_i_ps or - load_balancer_profile.outbound_ip_prefixes - ): - raise MutuallyExclusiveArgumentError( - "userDefinedRouting doesn't support customizing " - "a standard load balancer with IP addresses" - ) - else: - if ( - self.get_load_balancer_managed_outbound_ip_count() or - self.get_load_balancer_outbound_ips() or - self.get_load_balancer_outbound_ip_prefixes() - ): - raise MutuallyExclusiveArgumentError( - "userDefinedRouting doesn't support customizing " - "a standard load balancer with IP addresses" - ) - return outbound_type - - def _get_enable_windows_gmsa(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_windows_gmsa. - - This function supports the option of enable_validation. Please refer to function __validate_gmsa_options for - details of validation. - - :return: bool - """ - # read the original value passed by the command - enable_windows_gmsa = self.raw_param.get("enable_windows_gmsa") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.windows_profile and - # backward compatibility - hasattr(self.mc.windows_profile, "gmsa_profile") and - self.mc.windows_profile.gmsa_profile and - self.mc.windows_profile.gmsa_profile.enabled is not None - ): - enable_windows_gmsa = self.mc.windows_profile.gmsa_profile.enabled - - # this parameter does not need dynamic completion - # validation - if enable_validation: - ( - gmsa_dns_server, - gmsa_root_domain_name, - ) = self._get_gmsa_dns_server_and_root_domain_name( - enable_validation=False - ) - self.__validate_gmsa_options( - enable_windows_gmsa, gmsa_dns_server, gmsa_root_domain_name, self.get_yes() - ) - return enable_windows_gmsa - - def get_enable_windows_gmsa(self) -> bool: - """Obtain the value of enable_windows_gmsa. - - This function will verify the parameter by default. When enable_windows_gmsa is specified, if both - gmsa_dns_server and gmsa_root_domain_name are not assigned and user does not confirm the operation, - a DecoratorEarlyExitException will be raised; if only one of gmsa_dns_server or gmsa_root_domain_name is - assigned, raise a RequiredArgumentMissingError. When enable_windows_gmsa is not specified, if any of - gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError. - - :return: bool - """ - return self._get_enable_windows_gmsa(enable_validation=True) - - def _get_gmsa_dns_server_and_root_domain_name(self, enable_validation: bool = False): - """Internal function to obtain the values of gmsa_dns_server and gmsa_root_domain_name. - - This function supports the option of enable_validation. Please refer to function __validate_gmsa_options for - details of validation. - - :return: a tuple containing two elements: gmsa_dns_server of string type or None and gmsa_root_domain_name of - string type or None - """ - # gmsa_dns_server - # read the original value passed by the command - gmsa_dns_server = self.raw_param.get("gmsa_dns_server") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - gmsa_dns_read_from_mc = False - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.windows_profile and - # backward compatibility - hasattr(self.mc.windows_profile, "gmsa_profile") and - self.mc.windows_profile.gmsa_profile and - self.mc.windows_profile.gmsa_profile.dns_server is not None - ): - gmsa_dns_server = self.mc.windows_profile.gmsa_profile.dns_server - gmsa_dns_read_from_mc = True - - # gmsa_root_domain_name - # read the original value passed by the command - gmsa_root_domain_name = self.raw_param.get("gmsa_root_domain_name") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - gmsa_root_read_from_mc = False - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.windows_profile and - # backward compatibility - hasattr(self.mc.windows_profile, "gmsa_profile") and - self.mc.windows_profile.gmsa_profile and - self.mc.windows_profile.gmsa_profile.root_domain_name is not None - ): - gmsa_root_domain_name = self.mc.windows_profile.gmsa_profile.root_domain_name - gmsa_root_read_from_mc = True - - # consistent check - if gmsa_dns_read_from_mc != gmsa_root_read_from_mc: - raise CLIInternalError( - "Inconsistent state detected, one of gmsa_dns_server and gmsa_root_domain_name " - "is read from the `mc` object." - ) - - # this parameter does not need dynamic completion - # validation - if enable_validation: - self.__validate_gmsa_options( - self._get_enable_windows_gmsa(enable_validation=False), - gmsa_dns_server, - gmsa_root_domain_name, - self.get_yes(), - ) - return gmsa_dns_server, gmsa_root_domain_name - - def get_gmsa_dns_server_and_root_domain_name(self) -> Tuple[Union[str, None], Union[str, None]]: - """Obtain the values of gmsa_dns_server and gmsa_root_domain_name. - - This function will verify the parameter by default. When enable_windows_gmsa is specified, if both - gmsa_dns_server and gmsa_root_domain_name are not assigned and user does not confirm the operation, - a DecoratorEarlyExitException will be raised; if only one of gmsa_dns_server or gmsa_root_domain_name is - assigned, raise a RequiredArgumentMissingError. When enable_windows_gmsa is not specified, if any of - gmsa_dns_server or gmsa_root_domain_name is assigned, raise a RequiredArgumentMissingError. - - :return: a tuple containing two elements: gmsa_dns_server of string type or None and gmsa_root_domain_name of - string type or None - """ - return self._get_gmsa_dns_server_and_root_domain_name(enable_validation=True) - - def get_snapshot_id(self) -> Union[str, None]: - """Obtain the values of snapshot_id. - - :return: string or None - """ - # read the original value passed by the command - snapshot_id = self.raw_param.get("snapshot_id") - # try to read the property value corresponding to the parameter from the `mc` object - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if ( - agent_pool_profile and - agent_pool_profile.creation_data and - agent_pool_profile.creation_data.source_resource_id is not None - ): - snapshot_id = ( - agent_pool_profile.creation_data.source_resource_id - ) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return snapshot_id - - def get_snapshot(self) -> Union[Snapshot, None]: - """Helper function to retrieve the Snapshot object corresponding to a snapshot id. - - This fuction will store an intermediate "snapshot" to avoid sending the same request multiple times. - - Function "_get_snapshot" will be called to retrieve the Snapshot object corresponding to a snapshot id, which - internally used the snapshot client (snapshots operations belonging to container service client) to send - the request. - - :return: Snapshot or None - """ - # try to read from intermediates - snapshot = self.get_intermediate("snapshot") - if snapshot: - return snapshot - - snapshot_id = self.get_snapshot_id() - if snapshot_id: - snapshot = _get_snapshot(self.cmd.cli_ctx, snapshot_id) - self.set_intermediate("snapshot", snapshot, overwrite_exists=True) - return snapshot - - def get_cluster_snapshot_id(self) -> Union[str, None]: - """Obtain the values of cluster_snapshot_id. - - :return: string or None - """ - # read the original value passed by the command - snapshot_id = self.raw_param.get("cluster_snapshot_id") - # try to read the property value corresponding to the parameter from the `mc` object - if ( - self.mc and - self.mc.creation_data and - self.mc.creation_data.source_resource_id is not None - ): - snapshot_id = ( - self.mc.creation_data.source_resource_id - ) - - # this parameter does not need dynamic completion - # this parameter does not need validation - return snapshot_id - - def get_cluster_snapshot(self) -> Union[ManagedClusterSnapshot, None]: - """Helper function to retrieve the ManagedClusterSnapshot object corresponding to a cluster snapshot id. - - This fuction will store an intermediate "managedclustersnapshot" to avoid sending the same request multiple times. - - Function "_get_cluster_snapshot" will be called to retrieve the ManagedClusterSnapshot object corresponding to a cluster snapshot id, which - internally used the managedclustersnapshot client (managedclustersnapshots operations belonging to container service client) to send - the request. - - :return: ManagedClusterSnapshot or None - """ - # try to read from intermediates - snapshot = self.get_intermediate("managedclustersnapshot") - if snapshot: - return snapshot - - snapshot_id = self.get_cluster_snapshot_id() - if snapshot_id: - snapshot = _get_cluster_snapshot(self.cmd.cli_ctx, snapshot_id) - self.set_intermediate("managedclustersnapshot", - snapshot, overwrite_exists=True) - return snapshot - - def get_host_group_id(self) -> Union[str, None]: - return self._get_host_group_id() - - def _get_host_group_id(self) -> Union[str, None]: - raw_value = self.raw_param.get("host_group_id") - value_obtained_from_mc = None - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if agent_pool_profile: - value_obtained_from_mc = agent_pool_profile.host_group_id - if value_obtained_from_mc is not None: - host_group_id = value_obtained_from_mc - else: - host_group_id = raw_value - return host_group_id - - def _get_kubernetes_version(self, read_only: bool = False) -> str: - """Internal function to dynamically obtain the value of kubernetes_version according to the context. - - If snapshot_id is specified, dynamic completion will be triggerd, and will try to get the corresponding value - from the Snapshot. When determining the value of the parameter, obtaining from `mc` takes precedence over user's - explicit input over snapshot over default vaule. - - :return: string - """ - # read the original value passed by the command - raw_value = self.raw_param.get("kubernetes_version") - # try to read the property value corresponding to the parameter from the `mc` object - value_obtained_from_mc = None - if self.mc: - value_obtained_from_mc = self.mc.kubernetes_version - # try to retrieve the value from snapshot - value_obtained_from_snapshot = None - value_obtained_from_cluster_snapshot = None - # skip dynamic completion if read_only is specified - if not read_only: - snapshot = self.get_snapshot() - if snapshot: - value_obtained_from_snapshot = snapshot.kubernetes_version - - if not read_only: - snapshot = self.get_cluster_snapshot() - if snapshot: - value_obtained_from_cluster_snapshot = snapshot.managed_cluster_properties_read_only.kubernetes_version - - # set default value - if value_obtained_from_mc is not None: - kubernetes_version = value_obtained_from_mc - # default value is an empty string - elif raw_value: - kubernetes_version = raw_value - elif not read_only and value_obtained_from_cluster_snapshot is not None: - kubernetes_version = value_obtained_from_cluster_snapshot - elif not read_only and value_obtained_from_snapshot is not None: - kubernetes_version = value_obtained_from_snapshot - else: - kubernetes_version = raw_value - - # this parameter does not need validation - return kubernetes_version - - def get_kubernetes_version(self) -> str: - """Obtain the value of kubernetes_version. - - Note: Inherited and extended in aks-preview to add support for getting values from snapshot. - - :return: string - """ - return self._get_kubernetes_version() - - def _get_os_sku(self, read_only: bool = False) -> Union[str, None]: - """Internal function to dynamically obtain the value of os_sku according to the context. - - If snapshot_id is specified, dynamic completion will be triggerd, and will try to get the corresponding value - from the Snapshot. When determining the value of the parameter, obtaining from `mc` takes precedence over user's - explicit input over snapshot over default vaule. - - :return: string or None - """ - # read the original value passed by the command - raw_value = self.raw_param.get("os_sku") - # try to read the property value corresponding to the parameter from the `mc` object - value_obtained_from_mc = None - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if agent_pool_profile: - value_obtained_from_mc = agent_pool_profile.os_sku - # try to retrieve the value from snapshot - value_obtained_from_snapshot = None - # skip dynamic completion if read_only is specified - if not read_only: - snapshot = self.get_snapshot() - if snapshot: - value_obtained_from_snapshot = snapshot.os_sku - - # set default value - if value_obtained_from_mc is not None: - os_sku = value_obtained_from_mc - elif raw_value is not None: - os_sku = raw_value - elif not read_only and value_obtained_from_snapshot is not None: - os_sku = value_obtained_from_snapshot - else: - os_sku = raw_value - - # this parameter does not need validation - return os_sku - - def get_os_sku(self) -> Union[str, None]: - """Obtain the value of os_sku. - - Note: Inherited and extended in aks-preview to add support for getting values from snapshot. - - :return: string or None - """ - return self._get_os_sku() - - def _get_node_vm_size(self, read_only: bool = False) -> str: - """Internal function to dynamically obtain the value of node_vm_size according to the context. - - If snapshot_id is specified, dynamic completion will be triggerd, and will try to get the corresponding value - from the Snapshot. When determining the value of the parameter, obtaining from `mc` takes precedence over user's - explicit input over snapshot over default vaule. - - :return: string - """ - default_value = "Standard_DS2_v2" - # read the original value passed by the command - raw_value = self.raw_param.get("node_vm_size") - # try to read the property value corresponding to the parameter from the `mc` object - value_obtained_from_mc = None - if self.mc and self.mc.agent_pool_profiles: - agent_pool_profile = safe_list_get( - self.mc.agent_pool_profiles, 0, None - ) - if agent_pool_profile: - value_obtained_from_mc = agent_pool_profile.vm_size - # try to retrieve the value from snapshot - value_obtained_from_snapshot = None - # skip dynamic completion if read_only is specified - if not read_only: - snapshot = self.get_snapshot() - if snapshot: - value_obtained_from_snapshot = snapshot.vm_size - - # set default value - if value_obtained_from_mc is not None: - node_vm_size = value_obtained_from_mc - elif raw_value is not None: - node_vm_size = raw_value - elif value_obtained_from_snapshot is not None: - node_vm_size = value_obtained_from_snapshot - else: - node_vm_size = default_value - - # this parameter does not need validation - return node_vm_size - - def get_node_vm_size(self) -> str: - """Obtain the value of node_vm_size. - - Note: Inherited and extended in aks-preview to add support for getting values from snapshot. - - :return: string - """ - return self._get_node_vm_size() - - def get_disk_driver(self) -> Optional[ManagedClusterStorageProfileDiskCSIDriver]: - """Obtain the value of storage_profile.disk_csi_driver - - :return: Optional[ManagedClusterStorageProfileDiskCSIDriver] - """ - enable_disk_driver = self.raw_param.get("enable_disk_driver") - disable_disk_driver = self.raw_param.get("disable_disk_driver") - disk_driver_version = self.raw_param.get("disk_driver_version") - - if not enable_disk_driver and not disable_disk_driver and not disk_driver_version: - return None - profile = self.models.ManagedClusterStorageProfileDiskCSIDriver() - - if enable_disk_driver and disable_disk_driver: - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-disk-driver and " - "--disable-disk-driver at the same time." - ) - - if disable_disk_driver and disk_driver_version: - raise ArgumentUsageError( - "The parameter --disable-disk-driver cannot be used " - "when --disk-driver-version is specified.") - - if self.decorator_mode == DecoratorMode.UPDATE and disk_driver_version and not enable_disk_driver: - raise ArgumentUsageError( - "Parameter --enable-disk-driver is required " - "when --disk-driver-version is specified during update.") - - if self.decorator_mode == DecoratorMode.CREATE: - if disable_disk_driver: - profile.enabled = False - else: - profile.enabled = True - if not disk_driver_version: - disk_driver_version = CONST_DISK_DRIVER_V1 - profile.version = disk_driver_version - - if self.decorator_mode == DecoratorMode.UPDATE: - if enable_disk_driver: - profile.enabled = True - if disk_driver_version: - profile.version = disk_driver_version - elif disable_disk_driver: - msg = "Please make sure there are no existing PVs and PVCs that are used by AzureDisk CSI driver before disabling." - if not self.get_yes() and not prompt_y_n(msg, default="n"): - raise DecoratorEarlyExitException() - profile.enabled = False - - return profile - - def get_file_driver(self) -> Optional[ManagedClusterStorageProfileFileCSIDriver]: - """Obtain the value of storage_profile.file_csi_driver - - :return: Optional[ManagedClusterStorageProfileFileCSIDriver] - """ - enable_file_driver = self.raw_param.get("enable_file_driver") - disable_file_driver = self.raw_param.get("disable_file_driver") - - if not enable_file_driver and not disable_file_driver: - return None - profile = self.models.ManagedClusterStorageProfileFileCSIDriver() - - if enable_file_driver and disable_file_driver: - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-file-driver and " - "--disable-file-driver at the same time." - ) - - if self.decorator_mode == DecoratorMode.CREATE: - if disable_file_driver: - profile.enabled = False - else: - profile.enabled = True - - if self.decorator_mode == DecoratorMode.UPDATE: - if enable_file_driver: - profile.enabled = True - elif disable_file_driver: - msg = "Please make sure there are no existing PVs and PVCs that are used by AzureFile CSI driver before disabling." - if not self.get_yes() and not prompt_y_n(msg, default="n"): - raise DecoratorEarlyExitException() - profile.enabled = False - - return profile - - def get_snapshot_controller(self) -> Optional[ManagedClusterStorageProfileSnapshotController]: - """Obtain the value of storage_profile.snapshot_controller - - :return: Optional[ManagedClusterStorageProfileSnapshotController] - """ - enable_snapshot_controller = self.raw_param.get("enable_snapshot_controller") - disable_snapshot_controller = self.raw_param.get("disable_snapshot_controller") - - if not enable_snapshot_controller and not disable_snapshot_controller: - return None - - profile = self.models.ManagedClusterStorageProfileSnapshotController() - - if enable_snapshot_controller and disable_snapshot_controller: - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-snapshot_controller and " - "--disable-snapshot_controller at the same time." - ) - - if self.decorator_mode == DecoratorMode.CREATE: - if disable_snapshot_controller: - profile.enabled = False - else: - profile.enabled = True - - if self.decorator_mode == DecoratorMode.UPDATE: - if enable_snapshot_controller: - profile.enabled = True - elif disable_snapshot_controller: - msg = "Please make sure there are no existing VolumeSnapshots, VolumeSnapshotClasses and VolumeSnapshotContents " \ - "that are used by the snapshot controller before disabling." - if not self.get_yes() and not prompt_y_n(msg, default="n"): - raise DecoratorEarlyExitException() - profile.enabled = False - - return profile - - def get_storage_profile(self) -> Optional[ManagedClusterStorageProfile]: - """Obtain the value of storage_profile. - - :return: Optional[ManagedClusterStorageProfile] - """ - profile = self.models.ManagedClusterStorageProfile() - if self.mc.storage_profile is not None: - profile = self.mc.storage_profile - profile.disk_csi_driver = self.get_disk_driver() - profile.file_csi_driver = self.get_file_driver() - profile.snapshot_controller = self.get_snapshot_controller() - - return profile - - def get_oidc_issuer_profile(self) -> ManagedClusterOIDCIssuerProfile: - """Obtain the value of oidc_issuer_profile based on the user input. - - :return: ManagedClusterOIDCIssuerProfile - """ - enable_flag_value = bool(self.raw_param.get("enable_oidc_issuer")) - if not enable_flag_value: - # enable flag not set, return a None profile, server side will backfill the default/existing value - return None - - profile = self.models.ManagedClusterOIDCIssuerProfile() - if self.decorator_mode == DecoratorMode.UPDATE: - if self.mc.oidc_issuer_profile is not None: - profile = self.mc.oidc_issuer_profile - profile.enabled = True - - return profile - - def get_workload_identity_profile(self) -> Optional[ManagedClusterSecurityProfileWorkloadIdentity]: - """Obtrain the value of security_profile.workload_identity. - - :return: Optional[ManagedClusterSecurityProfileWorkloadIdentity] - """ - # NOTE: enable_workload_identity can be one of: - # - # - True: sets by user, to enable the workload identity feature - # - False: sets by user, to disable the workload identity feature - # - None: user unspecified, don't set the profile and let server side to backfill - enable_workload_identity = self.raw_param.get("enable_workload_identity") - - if enable_workload_identity is None: - return None - - profile = self.models.ManagedClusterSecurityProfileWorkloadIdentity() - if self.decorator_mode == DecoratorMode.UPDATE: - if self.mc.security_profile is not None and self.mc.security_profile.workload_identity is not None: - # reuse previous profile is has been set - profile = self.mc.security_profile.workload_identity - - profile.enabled = bool(enable_workload_identity) - - if profile.enabled: - # in enable case, we need to check if OIDC issuer has been enabled - oidc_issuer_profile = self.get_oidc_issuer_profile() - if self.decorator_mode == DecoratorMode.UPDATE and oidc_issuer_profile is None: - # if the cluster has enabled OIDC issuer before, in update call: - # - # az aks update --enable-workload-identity - # - # we need to use previous OIDC issuer profile - oidc_issuer_profile = self.mc.oidc_issuer_profile - oidc_issuer_enabled = oidc_issuer_profile is not None and oidc_issuer_profile.enabled - if not oidc_issuer_enabled: - raise RequiredArgumentMissingError( - "Enabling workload identity requires enabling OIDC issuer (--enable-oidc-issuer)." - ) - - return profile - - def get_crg_id(self) -> str: - """Obtain the values of crg_id. - - :return: string or None - """ - # read the original value passed by the command - crg_id = self.raw_param.get("crg_id") - return crg_id - - def _get_enable_azure_keyvault_kms(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_azure_keyvault_kms. - - This function supports the option of enable_validation. When enabled, if azure_keyvault_kms_key_id is empty, - raise a RequiredArgumentMissingError. - - :return: bool - """ - # read the original value passed by the command - # TODO: set default value as False after the get function of AKSParamDict accepts parameter `default` - enable_azure_keyvault_kms = self.raw_param.get( - "enable_azure_keyvault_kms") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.security_profile and - self.mc.security_profile.azure_key_vault_kms - ): - enable_azure_keyvault_kms = self.mc.security_profile.azure_key_vault_kms.enabled - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if bool(enable_azure_keyvault_kms) != bool(self._get_azure_keyvault_kms_key_id(enable_validation=False)): - raise RequiredArgumentMissingError( - 'You must set "--enable-azure-keyvault-kms" and "--azure-keyvault-kms-key-id" at the same time.' - ) - - return enable_azure_keyvault_kms - - def get_enable_azure_keyvault_kms(self) -> bool: - """Obtain the value of enable_azure_keyvault_kms. - - This function will verify the parameter by default. When enabled, if azure_keyvault_kms_key_id is empty, - raise a RequiredArgumentMissingError. - - :return: bool - """ - return self._get_enable_azure_keyvault_kms(enable_validation=True) - - def _get_azure_keyvault_kms_key_id(self, enable_validation: bool = False) -> Union[str, None]: - """Internal function to obtain the value of azure_keyvault_kms_key_id according to the context. - - This function supports the option of enable_validation. When enabled, it will check if azure_keyvault_kms_key_id is - assigned but enable_azure_keyvault_kms is not specified, if so, raise a RequiredArgumentMissingError. - - :return: string or None - """ - # read the original value passed by the command - azure_keyvault_kms_key_id = self.raw_param.get( - "azure_keyvault_kms_key_id") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.security_profile and - self.mc.security_profile.azure_key_vault_kms and - self.mc.security_profile.azure_key_vault_kms.key_id is not None - ): - azure_keyvault_kms_key_id = self.mc.security_profile.azure_key_vault_kms.key_id - - if enable_validation: - enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms( - enable_validation=False) - if ( - azure_keyvault_kms_key_id and - ( - enable_azure_keyvault_kms is None or - enable_azure_keyvault_kms is False - ) - ): - raise RequiredArgumentMissingError( - '"--azure-keyvault-kms-key-id" requires "--enable-azure-keyvault-kms".') - - return azure_keyvault_kms_key_id - - def get_azure_keyvault_kms_key_id(self) -> Union[str, None]: - """Obtain the value of azure_keyvault_kms_key_id. - - This function will verify the parameter by default. When enabled, if enable_azure_keyvault_kms is False, - raise a RequiredArgumentMissingError. - - :return: bool - """ - return self._get_azure_keyvault_kms_key_id(enable_validation=True) - - def _get_azure_keyvault_kms_key_vault_network_access(self, enable_validation: bool = False) -> Union[str, None]: - """Internal function to obtain the value of azure_keyvault_kms_key_vault_network_access according to the context. - - This function supports the option of enable_validation. When enabled, it will check if azure_keyvault_kms_key_vault_network_access is - assigned but enable_azure_keyvault_kms is not specified, if so, raise a RequiredArgumentMissingError. - - :return: string or None - """ - # read the original value passed by the command - azure_keyvault_kms_key_vault_network_access = self.raw_param.get( - "azure_keyvault_kms_key_vault_network_access") - # Do not read the property value corresponding to the parameter from the `mc` object in create mode, - # because keyVaultNetworkAccess has the default value "Public" in azure-rest-api-specs. - if enable_validation: - enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms( - enable_validation=False) - if ( - azure_keyvault_kms_key_vault_network_access and - ( - enable_azure_keyvault_kms is None or - enable_azure_keyvault_kms is False - ) - ): - raise RequiredArgumentMissingError( - '"--azure-keyvault-kms-key-vault-network-access" requires "--enable-azure-keyvault-kms".') - - return azure_keyvault_kms_key_vault_network_access - - def get_azure_keyvault_kms_key_vault_network_access(self) -> Union[str, None]: - """Obtain the value of azure_keyvault_kms_key_vault_network_access. - - This function will verify the parameter by default. When enabled, if enable_azure_keyvault_kms is False, - raise a RequiredArgumentMissingError. - - :return: bool - """ - return self._get_azure_keyvault_kms_key_vault_network_access(enable_validation=True) - - def _get_azure_keyvault_kms_key_vault_resource_id(self, enable_validation: bool = False) -> Union[str, None]: - """Internal function to obtain the value of azure_keyvault_kms_key_vault_resource_id according to the context. - - This function supports the option of enable_validation. When enabled, it will do validation, and raise a RequiredArgumentMissingError. - - :return: string or None - """ - # read the original value passed by the command - azure_keyvault_kms_key_vault_resource_id = self.raw_param.get( - "azure_keyvault_kms_key_vault_resource_id") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.security_profile and - self.mc.security_profile.azure_key_vault_kms and - self.mc.security_profile.azure_key_vault_kms.key_vault_resource_id is not None - ): - azure_keyvault_kms_key_vault_resource_id = self.mc.security_profile.azure_key_vault_kms.key_vault_resource_id - - if enable_validation: - enable_azure_keyvault_kms = self._get_enable_azure_keyvault_kms( - enable_validation=False) - if ( - azure_keyvault_kms_key_vault_resource_id and - ( - enable_azure_keyvault_kms is None or - enable_azure_keyvault_kms is False - ) - ): - raise RequiredArgumentMissingError( - '"--azure-keyvault-kms-key-vault-resource-id" requires "--enable-azure-keyvault-kms".') - - key_vault_network_access = self._get_azure_keyvault_kms_key_vault_network_access( - enable_validation=False) - if ( - key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE and - ( - azure_keyvault_kms_key_vault_resource_id is None or - azure_keyvault_kms_key_vault_resource_id == "" - ) - ): - raise ArgumentUsageError( - '"--azure-keyvault-kms-key-vault-resource-id" can not be empty if "--azure-keyvault-kms-key-vault-network-access" is "Private".') - if ( - key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PUBLIC and - ( - azure_keyvault_kms_key_vault_resource_id is not None and - azure_keyvault_kms_key_vault_resource_id != "" - ) - ): - raise ArgumentUsageError( - '"--azure-keyvault-kms-key-vault-resource-id" must be empty if "--azure-keyvault-kms-key-vault-network-access" is "Public".') - - return azure_keyvault_kms_key_vault_resource_id - - def get_azure_keyvault_kms_key_vault_resource_id(self) -> Union[str, None]: - """Obtain the value of azure_keyvault_kms_key_vault_resource_id. - - This function will verify the parameter by default. When enabled, if enable_azure_keyvault_kms is False, - raise a RequiredArgumentMissingError. - - :return: bool - """ - return self._get_azure_keyvault_kms_key_vault_resource_id(enable_validation=True) - - def get_updated_assign_kubelet_identity(self) -> str: - """Obtain the value of assign_kubelet_identity based on the user input. - - :return: str - """ - kubelet_identity_resource_id = self.raw_param.get("assign_kubelet_identity") - if not kubelet_identity_resource_id: - return "" - - msg = "You're going to update kubelet identity to {}, which will upgrade every node pool in the cluster " \ - "and might take a while, do you wish to continue?".format(kubelet_identity_resource_id) - if not self.get_yes() and not prompt_y_n(msg, default="n"): - raise DecoratorEarlyExitException - - return kubelet_identity_resource_id - - def get_cluster_uaidentity_object_id(self) -> str: - assigned_identity = self.get_assign_identity() - cluster_identity_resource_id = "" - if assigned_identity is None or assigned_identity == "": - # Suppose identity is present on mc - if not (self.mc and self.mc.identity and self.mc.identity.user_assigned_identities): - raise RequiredArgumentMissingError( - "--assign-identity is not provided and the cluster identity type is not user assigned, cannot update kubelet identity") - cluster_identity_resource_id = list(self.mc.identity.user_assigned_identities.keys())[0] - else: - cluster_identity_resource_id = assigned_identity - return self.get_identity_by_msi_client(cluster_identity_resource_id).principal_id - - def _get_enable_apiserver_vnet_integration(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_apiserver_vnet_integration. - - This function supports the option of enable_validation. When enable_apiserver_vnet_integration is specified, - For CREATE: if enable-private-cluster is not used, raise an RequiredArgumentMissingError; - For UPDATE: if apiserver-subnet-id is not used, raise an RequiredArgumentMissingError; - - :return: bool - """ - # read the original value passed by the command - enable_apiserver_vnet_integration = self.raw_param.get("enable_apiserver_vnet_integration") - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.api_server_access_profile and - self.mc.api_server_access_profile.enable_vnet_integration is not None - ): - enable_apiserver_vnet_integration = self.mc.api_server_access_profile.enable_vnet_integration - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if self.decorator_mode == DecoratorMode.CREATE: - if enable_apiserver_vnet_integration: - # remove this validation after we support public cluster - if not self._get_enable_private_cluster(enable_validation=False): - raise RequiredArgumentMissingError( - "--apiserver-vnet-integration is only supported for private cluster right now. " - "Please use it together with --enable-private-cluster" - ) - if self.decorator_mode == DecoratorMode.UPDATE: - if enable_apiserver_vnet_integration: - if self._get_apiserver_subnet_id(enable_validation=False) is None: - raise RequiredArgumentMissingError( - "--apiserver-subnet-id is required for update with --apiserver-vnet-integration." - ) - - return enable_apiserver_vnet_integration - - def get_enable_apiserver_vnet_integration(self) -> bool: - """Obtain the value of enable_apiserver_vnet_integration. - - This function will verify the parameter by default. When enable_apiserver_vnet_integration is specified, - For CREATE: if enable-private-cluster is not used, raise an RequiredArgumentMissingError; - For UPDATE: if apiserver-subnet-id is not used, raise an RequiredArgumentMissingError - - :return: bool - """ - return self._get_enable_apiserver_vnet_integration(enable_validation=True) - - def _get_apiserver_subnet_id(self, enable_validation: bool = False) -> Union[str, None]: - """Internal function to obtain the value of apiserver_subnet_id. - - This function supports the option of enable_validation. When apiserver_subnet_id is specified, - if enable_apiserver_vnet_integration is not used, raise an RequiredArgumentMissingError; - For CREATE: if vnet_subnet_id is not used, raise an RequiredArgumentMissingError; - - :return: bool - """ - # read the original value passed by the command - apiserver_subnet_id = self.raw_param.get("apiserver_subnet_id") - # try to read the property value corresponding to the parameter from the `mc` object - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.api_server_access_profile and - self.mc.api_server_access_profile.subnet_id is not None - ): - apiserver_subnet_id = self.mc.api_server_access_profile.subnet_id - - # this parameter does not need dynamic completion - # validation - if enable_validation: - if self.decorator_mode == DecoratorMode.CREATE: - vnet_subnet_id = self.get_vnet_subnet_id() - if apiserver_subnet_id and vnet_subnet_id is None: - raise RequiredArgumentMissingError( - '"--apiserver-subnet-id" requires "--vnet-subnet-id".') - - enable_apiserver_vnet_integration = self._get_enable_apiserver_vnet_integration( - enable_validation=False) - if ( - apiserver_subnet_id and - ( - enable_apiserver_vnet_integration is None or - enable_apiserver_vnet_integration is False - ) - ): - raise RequiredArgumentMissingError( - '"--apiserver-subnet-id" requires "--enable-apiserver-vnet-integration".') - - return apiserver_subnet_id - - def get_apiserver_subnet_id(self) -> Union[str, None]: - """Obtain the value of apiserver_subnet_id. - - This function will verify the parameter by default. When apiserver_subnet_id is specified, - if enable_apiserver_vnet_integration is not specified, raise an RequiredArgumentMissingError; - - :return: bool - """ - return self._get_apiserver_subnet_id(enable_validation=True) - - def _get_enable_keda(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of enable_keda. - - This function supports the option of enable_validation. When enabled, if both enable_keda and disable_keda are - specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - # Read the original value passed by the command. - enable_keda = self.raw_param.get("enable_keda") - - # In create mode, try to read the property value corresponding to the parameter from the `mc` object. - if self.decorator_mode == DecoratorMode.CREATE: - if ( - self.mc and - self.mc.workload_auto_scaler_profile and - self.mc.workload_auto_scaler_profile.keda - ): - enable_keda = self.mc.workload_auto_scaler_profile.keda.enabled - - # This parameter does not need dynamic completion. - if enable_validation: - if enable_keda and self._get_disable_keda(enable_validation=False): - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-keda and --disable-keda at the same time." - ) - - return enable_keda - - def get_enable_keda(self) -> bool: - """Obtain the value of enable_keda. - - This function will verify the parameter by default. If both enable_keda and disable_keda are specified, raise a - MutuallyExclusiveArgumentError. - - :return: bool - """ - return self._get_enable_keda(enable_validation=True) - - def _get_disable_keda(self, enable_validation: bool = False) -> bool: - """Internal function to obtain the value of disable_keda. - - This function supports the option of enable_validation. When enabled, if both enable_keda and disable_keda are - specified, raise a MutuallyExclusiveArgumentError. - - :return: bool - """ - # Read the original value passed by the command. - disable_keda = self.raw_param.get("disable_keda") - - # This option is not supported in create mode, hence we do not read the property value from the `mc` object. - # This parameter does not need dynamic completion. - if enable_validation: - if disable_keda and self._get_enable_keda(enable_validation=False): - raise MutuallyExclusiveArgumentError( - "Cannot specify --enable-keda and --disable-keda at the same time." - ) - - return disable_keda - - def get_disable_keda(self) -> bool: - """Obtain the value of disable_keda. - - This function will verify the parameter by default. If both enable_keda and disable_keda are specified, raise a - MutuallyExclusiveArgumentError. - - :return: bool - """ - return self._get_disable_keda(enable_validation=True) - - -class AKSPreviewCreateDecorator(AKSCreateDecorator): - # pylint: disable=super-init-not-called - def __init__( - self, - cmd: AzCliCommand, - client: ContainerServiceClient, - raw_parameters: Dict, - resource_type: ResourceType, - ): - """Internal controller of aks_create in aks-preview. - - Break down the all-in-one aks_create function into several relatively independent functions (some of them have - a certain order dependency) that only focus on a specific profile or process a specific piece of logic. - In addition, an overall control function is provided. By calling the aforementioned independent functions one - by one, a complete ManagedCluster object is gradually decorated and finally requests are sent to create a - cluster. - """ - self.cmd = cmd - self.client = client - self.models = AKSPreviewModels(cmd, resource_type) - # store the context in the process of assemble the ManagedCluster object - self.context = AKSPreviewContext( - cmd, - raw_parameters, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - - def set_up_agent_pool_profiles(self, mc: ManagedCluster) -> ManagedCluster: - """Set up agent pool profiles for the ManagedCluster object. - - Note: Inherited and extended in aks-preview to set some additional properties. - - :return: the ManagedCluster object - """ - mc = super().set_up_agent_pool_profiles(mc) - agent_pool_profile = safe_list_get(mc.agent_pool_profiles, 0, None) - - # set up extra parameters supported in aks-preview - agent_pool_profile.pod_subnet_id = self.context.get_pod_subnet_id() - agent_pool_profile.enable_fips = self.context.get_enable_fips_image() - agent_pool_profile.workload_runtime = ( - self.context.get_workload_runtime() - ) - agent_pool_profile.gpu_instance_profile = ( - self.context.get_gpu_instance_profile() - ) - agent_pool_profile.message_of_the_day = ( - self.context.get_message_of_the_day() - ) - agent_pool_profile.enable_custom_ca_trust = ( - self.context.get_enable_custom_ca_trust() - ) - agent_pool_profile.kubelet_config = self.context.get_kubelet_config() - agent_pool_profile.linux_os_config = self.context.get_linux_os_config() - - # snapshot creation data - creation_data = None - snapshot_id = self.context.get_snapshot_id() - if snapshot_id: - creation_data = self.models.CreationData( - source_resource_id=snapshot_id - ) - agent_pool_profile.creation_data = creation_data - agent_pool_profile.host_group_id = self.context.get_host_group_id() - agent_pool_profile.capacity_reservation_group_id = self.context.get_crg_id() - - mc.agent_pool_profiles = [agent_pool_profile] - return mc - - def set_up_creationdata_of_cluster_snapshot(self, mc: ManagedCluster) -> ManagedCluster: - """Set up creationData of cluster snapshot for the ManagedCluster object. - - Note: Inherited and extended in aks-preview to set some additional properties. - - :return: the ManagedCluster object - """ - # snapshot creation data - creation_data = None - snapshot_id = self.context.get_cluster_snapshot_id() - if snapshot_id: - creation_data = self.models.CreationData( - source_resource_id=snapshot_id - ) - mc.creation_data = creation_data - return mc - - def set_up_http_proxy_config(self, mc: ManagedCluster) -> ManagedCluster: - """Set up http proxy config for the ManagedCluster object. - - :return: the ManagedCluster object - """ - if not isinstance(mc, self.models.ManagedCluster): - raise CLIInternalError( - "Unexpected mc object with type '{}'.".format(type(mc)) - ) - - mc.http_proxy_config = self.context.get_http_proxy_config() - return mc - - def set_up_node_resource_group(self, mc: ManagedCluster) -> ManagedCluster: - """Set up node resource group for the ManagedCluster object. - - :return: the ManagedCluster object - """ - if not isinstance(mc, self.models.ManagedCluster): - raise CLIInternalError( - "Unexpected mc object with type '{}'.".format(type(mc)) - ) - - mc.node_resource_group = self.context.get_node_resource_group() - return mc - - def set_up_network_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up network profile for the ManagedCluster object. - - Note: Inherited and extended in aks-preview to set the nat_gateway_profile. - - :return: the ManagedCluster object - """ - mc = super().set_up_network_profile(mc) - network_profile = mc.network_profile - - ( - pod_cidr, - service_cidr, - dns_service_ip, - _, - _, - ) = self.context.get_pod_cidr_and_service_cidr_and_dns_service_ip_and_docker_bridge_address_and_network_policy() - - ( - pod_cidrs, - service_cidrs, - ip_families - ) = self.context.get_pod_cidrs_and_service_cidrs_and_ip_families() - - # set dns_service_ip, pod_cidr(s), service(s) with user provided values if - # of them are set. Largely follows the base function which will potentially - # overwrite default SDK values. - if any([ - dns_service_ip, - pod_cidr, - pod_cidrs, - service_cidr, - service_cidrs, - ]): - network_profile.dns_service_ip = dns_service_ip - network_profile.pod_cidr = pod_cidr - network_profile.pod_cidrs = pod_cidrs - network_profile.service_cidr = service_cidr - network_profile.service_cidrs = service_cidrs - - if ip_families: - network_profile.ip_families = ip_families - - if self.context.get_load_balancer_managed_outbound_ipv6_count() is not None: - network_profile.load_balancer_profile = create_load_balancer_profile( - self.context.get_load_balancer_managed_outbound_ip_count(), - self.context.get_load_balancer_managed_outbound_ipv6_count(), - self.context.get_load_balancer_outbound_ips(), - self.context.get_load_balancer_outbound_ip_prefixes(), - self.context.get_load_balancer_outbound_ports(), - self.context.get_load_balancer_idle_timeout(), - models=self.models.lb_models, - ) - - # build nat gateway profile, which is part of the network profile - nat_gateway_profile = create_nat_gateway_profile( - self.context.get_nat_gateway_managed_outbound_ip_count(), - self.context.get_nat_gateway_idle_timeout(), - models=self.models.nat_gateway_models, - ) - - load_balancer_sku = self.context.get_load_balancer_sku() - if load_balancer_sku != "basic": - network_profile.nat_gateway_profile = nat_gateway_profile - mc.network_profile = network_profile - return mc - - def set_up_pod_security_policy(self, mc: ManagedCluster) -> ManagedCluster: - """Set up pod security policy for the ManagedCluster object. - - :return: the ManagedCluster object - """ - if not isinstance(mc, self.models.ManagedCluster): - raise CLIInternalError( - "Unexpected mc object with type '{}'.".format(type(mc)) - ) - - mc.enable_pod_security_policy = self.context.get_enable_pod_security_policy() - return mc - - def set_up_pod_identity_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up pod identity profile for the ManagedCluster object. - - This profile depends on network profile. - - :return: the ManagedCluster object - """ - if not isinstance(mc, self.models.ManagedCluster): - raise CLIInternalError( - "Unexpected mc object with type '{}'.".format(type(mc)) - ) - - pod_identity_profile = None - enable_pod_identity = self.context.get_enable_pod_identity() - enable_pod_identity_with_kubenet = self.context.get_enable_pod_identity_with_kubenet() - if enable_pod_identity: - pod_identity_profile = self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - allow_network_plugin_kubenet=enable_pod_identity_with_kubenet, - ) - mc.pod_identity_profile = pod_identity_profile - return mc - - def build_monitoring_addon_profile(self) -> ManagedClusterAddonProfile: - """Build monitoring addon profile. - - Note: Overwritten in aks-preview. - - :return: a ManagedClusterAddonProfile object - """ - # determine the value of constants - addon_consts = self.context.get_addon_consts() - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = addon_consts.get( - "CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID" - ) - CONST_MONITORING_USING_AAD_MSI_AUTH = addon_consts.get( - "CONST_MONITORING_USING_AAD_MSI_AUTH" - ) - - monitoring_addon_profile = self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: self.context.get_workspace_resource_id(), - CONST_MONITORING_USING_AAD_MSI_AUTH: self.context.get_enable_msi_auth_for_monitoring(), - }, - ) - # post-process, create a deployment - ensure_container_insights_for_monitoring( - self.cmd, - monitoring_addon_profile, - self.context.get_subscription_id(), - self.context.get_resource_group_name(), - self.context.get_name(), - self.context.get_location(), - remove_monitoring=False, - aad_route=self.context.get_enable_msi_auth_for_monitoring(), - create_dcr=True, - create_dcra=False, - ) - # set intermediate - self.context.set_intermediate( - "monitoring", True, overwrite_exists=True) - return monitoring_addon_profile - - def build_ingress_appgw_addon_profile(self) -> ManagedClusterAddonProfile: - """Build ingress appgw addon profile. - - Note: Inherited and extended in aks-preview to support option appgw_subnet_prefix. - - :return: a ManagedClusterAddonProfile object - """ - # determine the value of constants - addon_consts = self.context.get_addon_consts() - CONST_INGRESS_APPGW_SUBNET_CIDR = addon_consts.get( - "CONST_INGRESS_APPGW_SUBNET_CIDR" - ) - - ingress_appgw_addon_profile = super().build_ingress_appgw_addon_profile() - appgw_subnet_prefix = self.context.get_appgw_subnet_prefix() - if ( - appgw_subnet_prefix is not None and - ingress_appgw_addon_profile.config.get( - CONST_INGRESS_APPGW_SUBNET_CIDR - ) - is None - ): - ingress_appgw_addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix - return ingress_appgw_addon_profile - - def build_gitops_addon_profile(self) -> ManagedClusterAddonProfile: - """Build gitops addon profile. - - :return: a ManagedClusterAddonProfile object - """ - gitops_addon_profile = self.models.ManagedClusterAddonProfile( - enabled=True, - ) - return gitops_addon_profile - - def build_web_app_routing_profile(self) -> ManagedClusterIngressProfileWebAppRouting: - """Build the ingress_profile.web_app_routing profile - - :return: a ManagedClusterIngressProfileWebAppRouting object - """ - profile = self.models.ManagedClusterIngressProfileWebAppRouting( - enabled=True, - ) - dns_zone_resource_id = self.context.raw_param.get("dns_zone_resource_id") - if dns_zone_resource_id is not None: - profile.dns_zone_resource_id = dns_zone_resource_id - return profile - - def set_up_addon_profiles(self, mc: ManagedCluster) -> ManagedCluster: - """Set up addon profiles for the ManagedCluster object. - - Note: Inherited and extended in aks-preview to set some extra addons. - - :return: the ManagedCluster object - """ - addon_consts = self.context.get_addon_consts() - CONST_GITOPS_ADDON_NAME = addon_consts.get("CONST_GITOPS_ADDON_NAME") - - mc = super().set_up_addon_profiles(mc) - addon_profiles = mc.addon_profiles - addons = self.context.get_enable_addons() - if "gitops" in addons: - addon_profiles[ - CONST_GITOPS_ADDON_NAME - ] = self.build_gitops_addon_profile() - mc.addon_profiles = addon_profiles - - if "web_application_routing" in addons: - if mc.ingress_profile is None: - mc.ingress_profile = self.models.ManagedClusterIngressProfile() - mc.ingress_profile.web_app_routing = self.build_web_app_routing_profile() - - return mc - - def set_up_windows_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up windows profile for the ManagedCluster object. - - Note: Inherited and extended in aks-preview to set gmsa related options. - - :return: the ManagedCluster object - """ - mc = super().set_up_windows_profile(mc) - windows_profile = mc.windows_profile - - if windows_profile and self.context.get_enable_windows_gmsa(): - gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name() - windows_profile.gmsa_profile = self.models.WindowsGmsaProfile( - enabled=True, - dns_server=gmsa_dns_server, - root_domain_name=gmsa_root_domain_name, - ) - mc.windows_profile = windows_profile - return mc - - def set_up_storage_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up storage profile for the ManagedCluster object. - :return: the ManagedCluster object - """ - mc.storage_profile = self.context.get_storage_profile() - - return mc - - def set_up_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up OIDC issuer profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - mc.oidc_issuer_profile = self.context.get_oidc_issuer_profile() - - return mc - - def set_up_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up workload identity for the ManagedCluster object. - - :return: the ManagedCluster object - """ - profile = self.context.get_workload_identity_profile() - if profile is None: - if mc.security_profile is not None: - # set the value to None to let server side to fill in the default value - mc.security_profile.workload_identity = None - return mc - - if mc.security_profile is None: - mc.security_profile = self.models.ManagedClusterSecurityProfile() - mc.security_profile.workload_identity = profile - - return mc - - def set_up_azure_keyvault_kms(self, mc: ManagedCluster) -> ManagedCluster: - """Set up security profile azureKeyVaultKms for the ManagedCluster object. - - :return: the ManagedCluster object - """ - if self.context.get_enable_azure_keyvault_kms(): - key_id = self.context.get_azure_keyvault_kms_key_id() - if key_id: - if mc.security_profile is None: - mc.security_profile = self.models.ManagedClusterSecurityProfile() - mc.security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id, - ) - key_vault_network_access = self.context.get_azure_keyvault_kms_key_vault_network_access() - mc.security_profile.azure_key_vault_kms.key_vault_network_access = key_vault_network_access - if key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE: - mc.security_profile.azure_key_vault_kms.key_vault_resource_id = self.context.get_azure_keyvault_kms_key_vault_resource_id() - - return mc - - def set_up_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up apiserverAccessProfile enableVnetIntegration and subnetId for the ManagedCluster object. - - :return: the ManagedCluster object - """ - mc = super().set_up_api_server_access_profile(mc) - if self.context.get_enable_apiserver_vnet_integration(): - mc.api_server_access_profile.enable_vnet_integration = True - if self.context.get_apiserver_subnet_id(): - mc.api_server_access_profile.subnet_id = self.context.get_apiserver_subnet_id() - - return mc - - def set_up_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Set up workload auto-scaler profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - if not isinstance(mc, self.models.ManagedCluster): - raise CLIInternalError(f"Unexpected mc object with type '{type(mc)}'.") - - if self.context.get_enable_keda(): - if mc.workload_auto_scaler_profile is None: - mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile() - mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True) - - return mc - - def construct_mc_preview_profile(self) -> ManagedCluster: - """The overall controller used to construct the preview ManagedCluster profile. - - The completely constructed ManagedCluster object will later be passed as a parameter to the underlying SDK - (mgmt-containerservice) to send the actual request. - - :return: the ManagedCluster object - """ - # construct the default ManagedCluster profile - mc = self.construct_default_mc_profile() - # set up http proxy config - mc = self.set_up_http_proxy_config(mc) - # set up node resource group - mc = self.set_up_node_resource_group(mc) - # set up pod security policy - mc = self.set_up_pod_security_policy(mc) - # set up pod identity profile - mc = self.set_up_pod_identity_profile(mc) - - # update workload identity & OIDC issuer settings - # NOTE: in current implementation, workload identity settings setup requires checking - # previous OIDC issuer profile. However, the OIDC issuer settings setup will - # overrides the previous OIDC issuer profile based on user input. Therefore, we have - # to make sure the workload identity settings setup is done after OIDC issuer settings. - mc = self.set_up_workload_identity_profile(mc) - mc = self.set_up_oidc_issuer_profile(mc) - - mc = self.set_up_azure_keyvault_kms(mc) - mc = self.set_up_creationdata_of_cluster_snapshot(mc) - - mc = self.set_up_storage_profile(mc) - - mc = self.set_up_workload_auto_scaler_profile(mc) - - return mc - - def create_mc_preview(self, mc: ManagedCluster) -> ManagedCluster: - """Send request to create a real managed cluster. - - Note: Inherited and extended in aks-preview to create dcr association for monitoring addon if - enable_msi_auth_for_monitoring is specified after cluster is created. - - :return: the ManagedCluster object - """ - created_cluster = super().create_mc(mc) - - # determine the value of constants - addon_consts = self.context.get_addon_consts() - CONST_MONITORING_ADDON_NAME = addon_consts.get( - "CONST_MONITORING_ADDON_NAME") - - # Due to SPN replication latency, we do a few retries here - max_retry = 30 - error_msg = "" - for _ in range(0, max_retry): - try: - if self.context.get_intermediate("monitoring") and self.context.get_enable_msi_auth_for_monitoring(): - # Create the DCR Association here - ensure_container_insights_for_monitoring( - self.cmd, - mc.addon_profiles[CONST_MONITORING_ADDON_NAME], - self.context.get_subscription_id(), - self.context.get_resource_group_name(), - self.context.get_name(), - self.context.get_location(), - remove_monitoring=False, - aad_route=self.context.get_enable_msi_auth_for_monitoring(), - create_dcr=False, - create_dcra=True, - ) - return created_cluster - # CloudError was raised before, but since the adoption of track 2 SDK, - # HttpResponseError would be raised instead - except (CloudError, HttpResponseError) as ex: - error_msg = str(ex) - if 'not found in Active Directory tenant' in ex.message: - time.sleep(3) - else: - raise ex - raise AzCLIError("Maximum number of retries exceeded. " + error_msg) - - -class AKSPreviewUpdateDecorator(AKSUpdateDecorator): - # pylint: disable=super-init-not-called - def __init__( - self, - cmd: AzCliCommand, - client: ContainerServiceClient, - raw_parameters: Dict, - resource_type: ResourceType, - ): - """Internal controller of aks_update in aks-preview. - - Break down the all-in-one aks_update function into several relatively independent functions (some of them have - a certain order dependency) that only focus on a specific profile or process a specific piece of logic. - In addition, an overall control function is provided. By calling the aforementioned independent functions one - by one, a complete ManagedCluster object is gradually updated and finally requests are sent to update an - existing cluster. - """ - self.cmd = cmd - self.client = client - self.models = AKSPreviewModels(cmd, resource_type) - # store the context in the process of assemble the ManagedCluster object - self.context = AKSPreviewContext( - cmd, - raw_parameters, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - - def check_raw_parameters(self): - """Helper function to check whether any parameters are set. - - Note: Overwritten in aks-preview to use different hard-coded error message. - - If the values of all the parameters are the default values, the command execution will be terminated early and - raise a RequiredArgumentMissingError. Neither the request to fetch or update the ManagedCluster object will be - sent. - - :return: None - """ - # exclude some irrelevant or mandatory parameters - excluded_keys = ("cmd", "client", "resource_group_name", "name") - # check whether the remaining parameters are set - # the default value None or False (and other empty values, like empty string) will be considered as not set - is_changed = any( - v for k, v in self.context.raw_param.items() if k not in excluded_keys) - - # special cases - # some parameters support the use of empty string or dictionary to update/remove previously set values - is_default = ( - self.context.get_cluster_autoscaler_profile() is None and - self.context.get_api_server_authorized_ip_ranges() is None and - self.context.get_nodepool_labels() is None - ) - - if not is_changed and is_default: - reconcilePrompt = 'no argument specified to update would you like to reconcile to current settings?' - if not prompt_y_n(reconcilePrompt, default="n"): - # Note: Uncomment the followings to automatically generate the error message. - # option_names = [ - # '"{}"'.format(format_parameter_name_to_option_name(x)) - # for x in self.context.raw_param.keys() - # if x not in excluded_keys - # ] - # error_msg = "Please specify one or more of {}.".format( - # " or ".join(option_names) - # ) - # raise RequiredArgumentMissingError(error_msg) - raise RequiredArgumentMissingError( - 'Please specify "--enable-cluster-autoscaler" or ' - '"--disable-cluster-autoscaler" or ' - '"--update-cluster-autoscaler" or ' - '"--cluster-autoscaler-profile" or ' - '"--enable-pod-security-policy" or ' - '"--disable-pod-security-policy" or ' - '"--api-server-authorized-ip-ranges" or ' - '"--attach-acr" or ' - '"--detach-acr" or ' - '"--uptime-sla" or ' - '"--no-uptime-sla" or ' - '"--load-balancer-managed-outbound-ip-count" or ' - '"--load-balancer-outbound-ips" or ' - '"--load-balancer-outbound-ip-prefixes" or ' - '"--nat-gateway-managed-outbound-ip-count" or ' - '"--nat-gateway-idle-timeout" or ' - '"--enable-aad" or ' - '"--aad-tenant-id" or ' - '"--aad-admin-group-object-ids" or ' - '"--enable-ahub" or ' - '"--disable-ahub" or ' - '"--enable-managed-identity" or ' - '"--enable-pod-identity" or ' - '"--disable-pod-identity" or ' - '"--auto-upgrade-channel" or ' - '"--enable-secret-rotation" or ' - '"--disable-secret-rotation" or ' - '"--rotation-poll-interval" or ' - '"--tags" or ' - '"--windows-admin-password" or ' - '"--enable-azure-rbac" or ' - '"--disable-azure-rbac" or ' - '"--enable-local-accounts" or ' - '"--disable-local-accounts" or ' - '"--enable-public-fqdn" or ' - '"--disable-public-fqdn"' - '"--enable-windows-gmsa" or ' - '"--nodepool-labels" or ' - '"--enable-oidc-issuer" or ' - '"--http-proxy-config" or ' - '"--enable-disk-driver" or ' - '"--disk-driver-version" or ' - '"--disable-disk-driver" or ' - '"--enable-file-driver" or ' - '"--disable-file-driver" or ' - '"--enable-snapshot-controller" or ' - '"--disable-snapshot-controller" or ' - '"--enable-azure-keyvault-kms" or ' - '"--enable-workload-identity" or ' - '"--disable-workload-identity" or ' - '"--enable-keda" or ' - '"--disable-keda".' - ) - - def update_load_balancer_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update load balancer profile for the ManagedCluster object. - - Note: Overwritten in aks-preview to set dual stack related properties. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - if not mc.network_profile: - raise UnknownError( - "Unexpectedly get an empty network profile in the process of updating load balancer profile." - ) - - # In the internal function "_update_load_balancer_profile", it will check whether the provided parameters - # have been assigned, and if there are any, the corresponding profile will be modified; otherwise, it will - # remain unchanged. - mc.network_profile.load_balancer_profile = _update_load_balancer_profile( - managed_outbound_ip_count=self.context.get_load_balancer_managed_outbound_ip_count(), - managed_outbound_ipv6_count=self.context.get_load_balancer_managed_outbound_ipv6_count(), - outbound_ips=self.context.get_load_balancer_outbound_ips(), - outbound_ip_prefixes=self.context.get_load_balancer_outbound_ip_prefixes(), - outbound_ports=self.context.get_load_balancer_outbound_ports(), - idle_timeout=self.context.get_load_balancer_idle_timeout(), - profile=mc.network_profile.load_balancer_profile, - models=self.models.lb_models, - ) - return mc - - def update_pod_security_policy(self, mc: ManagedCluster) -> ManagedCluster: - """Update pod security policy for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - if self.context.get_enable_pod_security_policy(): - mc.enable_pod_security_policy = True - - if self.context.get_disable_pod_security_policy(): - mc.enable_pod_security_policy = False - return mc - - def update_http_proxy_config(self, mc: ManagedCluster) -> ManagedCluster: - """Set up http proxy config for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - mc.http_proxy_config = self.context.get_http_proxy_config() - return mc - - def update_windows_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update windows profile for the ManagedCluster object. - - Note: Inherited and extended in aks-preview to set gmsa related properties. - - :return: the ManagedCluster object - """ - mc = super().update_windows_profile(mc) - windows_profile = mc.windows_profile - - if self.context.get_enable_windows_gmsa(): - if not windows_profile: - raise UnknownError( - "Encounter an unexpected error while getting windows profile " - "from the cluster in the process of update." - ) - gmsa_dns_server, gmsa_root_domain_name = self.context.get_gmsa_dns_server_and_root_domain_name() - windows_profile.gmsa_profile = self.models.WindowsGmsaProfile( - enabled=True, - dns_server=gmsa_dns_server, - root_domain_name=gmsa_root_domain_name, - ) - return mc - - # TODO: may combine this with update_load_balancer_profile - def update_nat_gateway_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update nat gateway profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - nat_gateway_managed_outbound_ip_count = self.context.get_nat_gateway_managed_outbound_ip_count() - nat_gateway_idle_timeout = self.context.get_nat_gateway_idle_timeout() - if is_nat_gateway_profile_provided(nat_gateway_managed_outbound_ip_count, nat_gateway_idle_timeout): - if not mc.network_profile: - raise UnknownError( - "Unexpectedly get an empty network profile in the process of updating nat gateway profile." - ) - - mc.network_profile.nat_gateway_profile = _update_nat_gateway_profile( - nat_gateway_managed_outbound_ip_count, - nat_gateway_idle_timeout, - mc.network_profile.nat_gateway_profile, - models=self.models.nat_gateway_models, - ) - return mc - - def update_pod_identity_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update pod identity profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - if self.context.get_enable_pod_identity(): - if not _is_pod_identity_addon_enabled(mc): - # we only rebuild the pod identity profile if it's disabled before - _update_addon_pod_identity( - mc, - enable=True, - allow_kubenet_consent=self.context.get_enable_pod_identity_with_kubenet(), - models=self.models.pod_identity_models - ) - - if self.context.get_disable_pod_identity(): - _update_addon_pod_identity( - mc, enable=False, models=self.models.pod_identity_models) - return mc - - def update_oidc_issuer_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update OIDC issuer profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - mc.oidc_issuer_profile = self.context.get_oidc_issuer_profile() - - return mc - - def update_workload_identity_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update workload identity profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - profile = self.context.get_workload_identity_profile() - if profile is None: - if mc.security_profile is not None: - # set the value to None to let server side to fill in the default value - mc.security_profile.workload_identity = None - return mc - - if mc.security_profile is None: - mc.security_profile = self.models.ManagedClusterSecurityProfile() - mc.security_profile.workload_identity = profile - - return mc - - def update_azure_keyvault_kms(self, mc: ManagedCluster) -> ManagedCluster: - """Update security profile azureKeyvaultKms for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - if self.context.get_enable_azure_keyvault_kms(): - key_id = self.context.get_azure_keyvault_kms_key_id() - if key_id: - if mc.security_profile is None: - mc.security_profile = self.models.ManagedClusterSecurityProfile() - mc.security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id, - ) - key_vault_network_access = self.context.get_azure_keyvault_kms_key_vault_network_access() - mc.security_profile.azure_key_vault_kms.key_vault_network_access = key_vault_network_access - if key_vault_network_access == CONST_AZURE_KEYVAULT_NETWORK_ACCESS_PRIVATE: - mc.security_profile.azure_key_vault_kms.key_vault_resource_id = self.context.get_azure_keyvault_kms_key_vault_resource_id() - - return mc - - def update_storage_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update storage profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - mc.storage_profile = self.context.get_storage_profile() - - return mc - - def update_identity_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update identity profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - assign_kubelet_identity = self.context.get_updated_assign_kubelet_identity() - if assign_kubelet_identity: - identity_profile = { - 'kubeletidentity': self.models.UserAssignedIdentity( - resource_id=assign_kubelet_identity, - ) - } - cluster_identity_object_id = self.context.get_cluster_uaidentity_object_id() - # ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity - _ensure_cluster_identity_permission_on_kubelet_identity( - self.cmd.cli_ctx, - cluster_identity_object_id, - assign_kubelet_identity) - mc.identity_profile = identity_profile - return mc - - def update_api_server_access_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update apiServerAccessProfile vnet integration related property for the ManagedCluster object. - - :return: the ManagedCluster object - """ - mc = super().update_api_server_access_profile(mc) - if self.context.get_enable_apiserver_vnet_integration(): - if mc.api_server_access_profile is None: - mc.api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() - mc.api_server_access_profile.enable_vnet_integration = True - if self.context.get_apiserver_subnet_id(): - mc.api_server_access_profile.subnet_id = self.context.get_apiserver_subnet_id() - - return mc - - def update_workload_auto_scaler_profile(self, mc: ManagedCluster) -> ManagedCluster: - """Update workload auto-scaler profile for the ManagedCluster object. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - if self.context.get_enable_keda(): - if mc.workload_auto_scaler_profile is None: - mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile() - mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True) - - if self.context.get_disable_keda(): - if mc.workload_auto_scaler_profile is None: - mc.workload_auto_scaler_profile = self.models.ManagedClusterWorkloadAutoScalerProfile() - mc.workload_auto_scaler_profile.keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=False) - - return mc - - def patch_mc(self, mc: ManagedCluster) -> ManagedCluster: - """Helper function to patch the ManagedCluster object. - - This is a collection of workarounds on the cli side before fixing the problems on the rp side. - - :return: the ManagedCluster object - """ - self._ensure_mc(mc) - - # fill default values for pod labels in pod identity exceptions - _fill_defaults_for_pod_identity_profile(mc.pod_identity_profile) - return mc - - def update_mc_preview_profile(self) -> ManagedCluster: - """The overall controller used to update the preview ManagedCluster profile. - - The completely updated ManagedCluster object will later be passed as a parameter to the underlying SDK - (mgmt-containerservice) to send the actual request. - - :return: the ManagedCluster object - """ - # update the default ManagedCluster profile - mc = self.update_default_mc_profile() - # patch mc - mc = self.patch_mc(mc) - # update pod security policy - mc = self.update_pod_security_policy(mc) - # update nat gateway profile - mc = self.update_nat_gateway_profile(mc) - # update pod identity profile - mc = self.update_pod_identity_profile(mc) - - # update workload identity & OIDC issuer settings - # NOTE: in current implementation, workload identity settings setup requires checking - # previous OIDC issuer profile. However, the OIDC issuer settings setup will - # overrides the previous OIDC issuer profile based on user input. Therefore, we have - # to make sure the workload identity settings setup is done after OIDC issuer settings. - mc = self.update_workload_identity_profile(mc) - mc = self.update_oidc_issuer_profile(mc) - - mc = self.update_http_proxy_config(mc) - mc = self.update_azure_keyvault_kms(mc) - # update identity profile - mc = self.update_identity_profile(mc) - - mc = self.update_storage_profile(mc) - - mc = self.update_workload_auto_scaler_profile(mc) - - return mc - - def update_mc_preview(self, mc: ManagedCluster) -> ManagedCluster: - """Send request to update the existing managed cluster. - - :return: the ManagedCluster object - """ - return super().update_mc(mc) diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_diagnostics.py b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_diagnostics.py index bbd75cbd16e..4d351a568c8 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_aks_diagnostics.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_aks_diagnostics.py @@ -25,3 +25,7 @@ def test_generate_container_name_not_containing_hcp(self): expected_container_name = 'abcdef-dns-ed55ba6d-e48fe2bd-b4bc-4aac-bc23-29bc44154fe1-privat' trim_container_name = commands._generate_container_name(None, private_fqdn) self.assertEqual(expected_container_name, trim_container_name) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_decorator.py b/src/aks-preview/azext_aks_preview/tests/latest/test_decorator.py deleted file mode 100644 index 76ac1f64af3..00000000000 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_decorator.py +++ /dev/null @@ -1,5716 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import importlib -import unittest -from unittest.mock import Mock, patch - -from azext_aks_preview.__init__ import register_aks_preview_resource_type -from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW -from azext_aks_preview._consts import ( - ADDONS, - CONST_ACC_SGX_QUOTE_HELPER_ENABLED, - CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, - CONST_AZURE_POLICY_ADDON_NAME, - CONST_CONFCOM_ADDON_NAME, - CONST_GITOPS_ADDON_NAME, - CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME, - CONST_INGRESS_APPGW_ADDON_NAME, - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME, - CONST_INGRESS_APPGW_SUBNET_CIDR, - CONST_INGRESS_APPGW_SUBNET_ID, - CONST_INGRESS_APPGW_WATCH_NAMESPACE, - CONST_KUBE_DASHBOARD_ADDON_NAME, - CONST_MONITORING_ADDON_NAME, - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, - CONST_MONITORING_USING_AAD_MSI_AUTH, - CONST_OPEN_SERVICE_MESH_ADDON_NAME, - CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY, - CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY, - CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, - CONST_ROTATION_POLL_INTERVAL, - CONST_SECRET_ROTATION_ENABLED, - CONST_VIRTUAL_NODE_ADDON_NAME, - CONST_VIRTUAL_NODE_SUBNET_NAME, -) -from azext_aks_preview.decorator import ( - AKSPreviewContext, - AKSPreviewCreateDecorator, - AKSPreviewModels, - AKSPreviewUpdateDecorator, -) -from azext_aks_preview.tests.latest.mocks import MockCLI, MockClient, MockCmd -from azext_aks_preview.tests.latest.test_aks_commands import _get_test_data_file -from azure.cli.command_modules.acs._consts import ( - DecoratorEarlyExitException, - DecoratorMode, -) -from azure.cli.core.azclierror import ( - ArgumentUsageError, - AzCLIError, - CLIInternalError, - InvalidArgumentValueError, - MutuallyExclusiveArgumentError, - RequiredArgumentMissingError, - UnknownError, -) -from knack.util import CLIError -from knack.prompting import NoTTYException -from azure.core.exceptions import HttpResponseError -from msrestazure.azure_exceptions import CloudError - - -class AKSPreviewModelsTestCase(unittest.TestCase): - def setUp(self): - # manually register CUSTOM_MGMT_AKS_PREVIEW - register_aks_preview_resource_type() - self.cli_ctx = MockCLI() - self.cmd = MockCmd(self.cli_ctx) - - def test_models(self): - models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) - - # load models directly (instead of through the `get_sdk` method provided by the cli component) - from azure.cli.core.profiles._shared import AZURE_API_PROFILES - - sdk_profile = AZURE_API_PROFILES["latest"][CUSTOM_MGMT_AKS_PREVIEW] - api_version = sdk_profile.default_api_version - module_name = "azext_aks_preview.vendored_sdks.azure_mgmt_preview_aks.v{}.models".format( - api_version.replace("-", "_") - ) - module = importlib.import_module(module_name) - - self.assertEqual(models.KubeletConfig, - getattr(module, "KubeletConfig")) - self.assertEqual(models.LinuxOSConfig, - getattr(module, "LinuxOSConfig")) - self.assertEqual( - models.ManagedClusterHTTPProxyConfig, - getattr(module, "ManagedClusterHTTPProxyConfig"), - ) - self.assertEqual( - models.WindowsGmsaProfile, getattr(module, "WindowsGmsaProfile") - ) - self.assertEqual(models.CreationData, getattr(module, "CreationData")) - # nat gateway models - self.assertEqual( - models.nat_gateway_models.ManagedClusterNATGatewayProfile, - getattr(module, "ManagedClusterNATGatewayProfile"), - ) - self.assertEqual( - models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile, - getattr(module, "ManagedClusterManagedOutboundIPProfile"), - ) - # pod identity models - self.assertEqual( - models.pod_identity_models.ManagedClusterPodIdentityProfile, - getattr(module, "ManagedClusterPodIdentityProfile"), - ) - self.assertEqual( - models.pod_identity_models.ManagedClusterPodIdentityException, - getattr(module, "ManagedClusterPodIdentityException"), - ) - - # workload auto-scaler profile models - self.assertEqual( - models.ManagedClusterWorkloadAutoScalerProfile, - getattr(module, "ManagedClusterWorkloadAutoScalerProfile") - ) - - self.assertEqual( - models.ManagedClusterWorkloadAutoScalerProfileKeda, - getattr(module, "ManagedClusterWorkloadAutoScalerProfileKeda") - ) - - -class AKSPreviewContextTestCase(unittest.TestCase): - def setUp(self): - # manually register CUSTOM_MGMT_AKS_PREVIEW - register_aks_preview_resource_type() - self.cli_ctx = MockCLI() - self.cmd = MockCmd(self.cli_ctx) - self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) - - def test_validate_pod_identity_with_kubenet(self): - # custom value - ctx_1 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - network_profile_1 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet" - ) - mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=network_profile_1, - ) - # fail on enable_pod_identity_with_kubenet not specified - with self.assertRaises(RequiredArgumentMissingError): - ctx_1._AKSPreviewContext__validate_pod_identity_with_kubenet( - mc_1, True, False - ) - - def test_get_vm_set_type(self): - # default & dynamic completion - ctx_1 = AKSPreviewContext( - self.cmd, - { - "vm_set_type": None, - "kubernetes_version": "", - "enable_vmss": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1._get_vm_set_type(read_only=True), None) - self.assertEqual(ctx_1.get_vm_set_type(), "VirtualMachineScaleSets") - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_ap_name", type="test_mc_vm_set_type" - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_vm_set_type(), "test_mc_vm_set_type") - - # custom value & dynamic completion - ctx_2 = AKSPreviewContext( - self.cmd, - { - "vm_set_type": "availabilityset", - "kubernetes_version": "", - "enable_vmss": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid vm_set_type when enable_vmss is specified - with self.assertRaises(InvalidArgumentValueError): - self.assertEqual(ctx_2.get_vm_set_type(), "AvailabilitySet") - - # custom value & dynamic completion - ctx_3 = AKSPreviewContext( - self.cmd, - { - "vm_set_type": None, - "kubernetes_version": "", - "enable_vmss": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid vm_set_type when enable_vmss is specified - self.assertEqual(ctx_3.get_vm_set_type(), "VirtualMachineScaleSets") - - def test_get_zones(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"node_zones": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_zones(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - availability_zones=["test_mc_zones1", "test_mc_zones2"], - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_zones(), ["test_mc_zones1", "test_mc_zones2"] - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"node_zones": ["test_zones1", "test_zones2"]}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_zones(), ["test_zones1", "test_zones2"]) - - def test_get_pod_subnet_id(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"pod_subnet_id": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_pod_subnet_id(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", pod_subnet_id="test_mc_pod_subnet_id" - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_pod_subnet_id(), "test_mc_pod_subnet_id") - - def test_get_pod_cidrs(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"pod_cidrs": "10.244.0.0/16,2001:abcd::/64"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_1.get_pod_cidrs(), ["10.244.0.0/16", "2001:abcd::/64"] - ) - - ctx_2 = AKSPreviewContext( - self.cmd, - {"pod_cidrs": ""}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_pod_cidrs(), []) - - ctx_3 = AKSPreviewContext( - self.cmd, - {"pod_cidrs": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_pod_cidrs(), None) - - def test_get_service_cidrs(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"service_cidrs": "10.244.0.0/16,2001:abcd::/64"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_1.get_service_cidrs(), ["10.244.0.0/16", "2001:abcd::/64"] - ) - - ctx_2 = AKSPreviewContext( - self.cmd, - {"service_cidrs": ""}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_service_cidrs(), []) - - ctx_3 = AKSPreviewContext( - self.cmd, - {"service_cidrs": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_service_cidrs(), None) - - def test_get_ip_families(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"ip_families": "IPv4,IPv6"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_ip_families(), ["IPv4", "IPv6"]) - - ctx_2 = AKSPreviewContext( - self.cmd, - {"ip_families": ""}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_ip_families(), []) - - ctx_3 = AKSPreviewContext( - self.cmd, - {"ip_families": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_ip_families(), None) - - def test_get_load_balancer_managed_outbound_ip_count(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "load_balancer_managed_outbound_ip_count": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_1.get_load_balancer_managed_outbound_ip_count(), None - ) - load_balancer_profile = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10) - ) - network_profile = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=load_balancer_profile - ) - mc = self.models.ManagedCluster( - location="test_location", network_profile=network_profile - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_load_balancer_managed_outbound_ip_count(), 10 - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - load_balancer_profile_2 = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=20), - outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPs" - )( - public_i_ps=[ - self.models.lb_models.get("ResourceReference")( - id="test_public_ip" - ) - ] - ), - outbound_ip_prefixes=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" - )( - public_ip_prefixes=[ - self.models.lb_models.get("ResourceReference")( - id="test_public_ip_prefix" - ) - ] - ), - ) - network_profile_2 = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=load_balancer_profile_2 - ) - mc_2 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_2 - ) - ctx_2.attach_mc(mc_2) - self.assertEqual( - ctx_2.get_load_balancer_managed_outbound_ip_count(), 10 - ) - - def test_get_load_balancer_managed_outbound_ipv6_count(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "load_balancer_managed_outbound_ipv6_count": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_1.get_load_balancer_managed_outbound_ipv6_count(), None - ) - load_balancer_profile = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count_ipv6=10) - ) - network_profile = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=load_balancer_profile - ) - mc = self.models.ManagedCluster( - location="test_location", network_profile=network_profile - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_load_balancer_managed_outbound_ipv6_count(), 10 - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"load_balancer_managed_outbound_ipv6_count": 0}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_2.get_load_balancer_managed_outbound_ipv6_count(), 0 - ) - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - load_balancer_profile_3 = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=20), - outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPs" - )( - public_i_ps=[ - self.models.lb_models.get("ResourceReference")( - id="test_public_ip" - ) - ] - ), - outbound_ip_prefixes=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" - )( - public_ip_prefixes=[ - self.models.lb_models.get("ResourceReference")( - id="test_public_ip_prefix" - ) - ] - ), - ) - network_profile_3 = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=load_balancer_profile_3 - ) - mc_3 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_3 - ) - ctx_3.attach_mc(mc_3) - self.assertEqual( - ctx_3.get_load_balancer_managed_outbound_ipv6_count(), 20 - ) - - def test_get_enable_fips_image(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"enable_fips_image": False}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_fips_image(), False) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - enable_fips=True, - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_enable_fips_image(), True) - - def test_get_workload_runtime(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"workload_runtime": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_workload_runtime(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - workload_runtime="test_mc_workload_runtime", - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_workload_runtime(), "test_mc_workload_runtime" - ) - - def test_get_gpu_instance_profile(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"gpu_instance_profile": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_gpu_instance_profile(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - gpu_instance_profile="test_mc_gpu_instance_profile", - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_gpu_instance_profile(), "test_mc_gpu_instance_profile" - ) - - def test_get_message_of_the_day(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"message_of_the_day": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_message_of_the_day(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - message_of_the_day="test_mc_message_of_the_day", - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_message_of_the_day(), "test_mc_message_of_the_day" - ) - - # custom - ctx_2 = AKSPreviewContext( - self.cmd, - {"message_of_the_day": "fake-path"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file path - with self.assertRaises(InvalidArgumentValueError): - ctx_2.get_message_of_the_day() - - # custom - ctx_3 = AKSPreviewContext( - self.cmd, - {"message_of_the_day": _get_test_data_file("invalidconfig.json")}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_message_of_the_day(), "W10=") - - def test_get_enable_custom_ca_trust(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"enable_custom_ca_trust": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_custom_ca_trust(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - enable_custom_ca_trust=True, - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_enable_custom_ca_trust(), True - ) - - def test_get_kubelet_config(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"kubelet_config": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_kubelet_config(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - kubelet_config=self.models.KubeletConfig(pod_max_pids=100), - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_kubelet_config(), - self.models.KubeletConfig(pod_max_pids=100), - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"kubelet_config": "fake-path"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file path - with self.assertRaises(InvalidArgumentValueError): - ctx_2.get_kubelet_config() - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - {"kubelet_config": _get_test_data_file("invalidconfig.json")}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file content - with self.assertRaises(InvalidArgumentValueError): - ctx_3.get_kubelet_config() - - def test_get_linux_os_config(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"linux_os_config": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_linux_os_config(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", - linux_os_config=self.models.LinuxOSConfig(swap_file_size_mb=200), - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_linux_os_config(), - self.models.LinuxOSConfig(swap_file_size_mb=200), - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"linux_os_config": "fake-path"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file path - with self.assertRaises(InvalidArgumentValueError): - ctx_2.get_linux_os_config() - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - {"linux_os_config": _get_test_data_file("invalidconfig.json")}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file content - with self.assertRaises(InvalidArgumentValueError): - ctx_3.get_linux_os_config() - - def test_get_http_proxy_config(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"http_proxy_config": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_http_proxy_config(), None) - mc = self.models.ManagedCluster( - location="test_location", - http_proxy_config=self.models.ManagedClusterHTTPProxyConfig( - http_proxy="test_http_proxy" - ), - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_http_proxy_config(), - self.models.ManagedClusterHTTPProxyConfig( - http_proxy="test_http_proxy" - ), - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"http_proxy_config": "fake-path"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file path - with self.assertRaises(InvalidArgumentValueError): - ctx_2.get_http_proxy_config() - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - {"http_proxy_config": _get_test_data_file("invalidconfig.json")}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid file path - with self.assertRaises(InvalidArgumentValueError): - ctx_3.get_http_proxy_config() - - def test_get_node_resource_group(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"node_resource_group": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_node_resource_group(), None) - mc = self.models.ManagedCluster( - location="test_location", - node_resource_group="test_node_resource_group", - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_node_resource_group(), "test_node_resource_group" - ) - - def test_get_nat_gateway_managed_outbound_ip_count(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"nat_gateway_managed_outbound_ip_count": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_1.get_nat_gateway_managed_outbound_ip_count(), None - ) - nat_gateway_profile = self.models.nat_gateway_models.ManagedClusterNATGatewayProfile( - managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile( - count=10 - ) - ) - network_profile = self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=nat_gateway_profile - ) - mc = self.models.ManagedCluster( - location="test_location", - network_profile=network_profile, - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_nat_gateway_managed_outbound_ip_count(), 10) - - def test_get_nat_gateway_idle_timeout(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"nat_gateway_idle_timeout": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), None) - nat_gateway_profile = ( - self.models.nat_gateway_models.ManagedClusterNATGatewayProfile( - idle_timeout_in_minutes=20, - ) - ) - network_profile = self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=nat_gateway_profile - ) - mc = self.models.ManagedCluster( - location="test_location", - network_profile=network_profile, - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_nat_gateway_idle_timeout(), 20) - - def test_get_storage_profile_update(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = None, - file_csi_driver = None, - snapshot_controller = None, - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_storage_profile(), storage_profile - ) - - # custom disk value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "disable_disk_driver": True, - "yes": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - # fail on mutually exclusive enable_disk_driver and disable_disk_driver - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_2.get_disk_driver() - - # custom file value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_file_driver": True, - "disable_file_driver": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - # fail on mutually exclusive enable_file_driver and disable_file_driver - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_3.get_file_driver() - - # custom file value - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_snapshot_controller": True, - "disable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - # fail on mutually exclusive enable_snapshot_controller and disable_snapshot_controller - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_4.get_snapshot_controller() - - # default with csi driver enabled flag - ctx_5 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "enable_file_driver": True, - "enable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - ), - file_csi_driver = self.models.ManagedClusterStorageProfileFileCSIDriver( - enabled = True, - ), - snapshot_controller = self.models.ManagedClusterStorageProfileSnapshotController( - enabled = True, - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_5.attach_mc(mc) - self.assertEqual( - ctx_5.get_storage_profile(), storage_profile - ) - - # disk_driver_version value passed and enable-disk-driver passed - ctx_6 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "disk_driver_version": "v2", - "enable_file_driver": True, - "enable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - version = "v2", - ), - file_csi_driver = self.models.ManagedClusterStorageProfileFileCSIDriver( - enabled = True, - ), - snapshot_controller = self.models.ManagedClusterStorageProfileSnapshotController( - enabled = True, - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_6.attach_mc(mc) - self.assertEqual( - ctx_6.get_storage_profile(), storage_profile - ) - - # fail with enable-disk-driver as false and value passed for disk_driver_version - ctx_8 = AKSPreviewContext( - self.cmd, - { - "disable_disk_driver": True, - "disk_driver_version": "v2", - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - - # fail on argument usage error - with self.assertRaises(ArgumentUsageError): - ctx_8.get_disk_driver() - - # enable-disk-driver, disk_driver_version, enable_file_driver and enable_snapshot_controller passed - ctx_9 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "disk_driver_version": "v2", - "disable_file_driver": True, - "disable_snapshot_controller": True, - "yes": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - version = "v2", - ), - file_csi_driver = self.models.ManagedClusterStorageProfileFileCSIDriver( - enabled = False, - ), - snapshot_controller = self.models.ManagedClusterStorageProfileSnapshotController( - enabled = False, - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_9.attach_mc(mc) - self.assertEqual( - ctx_9.get_storage_profile(), storage_profile - ) - - # fail with enable-disk-driver as false and value passed for disk_driver_version - ctx_10 = AKSPreviewContext( - self.cmd, - { - "disk_driver_version": "v2", - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - - # fail on argument usage error - with self.assertRaises(ArgumentUsageError): - ctx_10.get_disk_driver() - - # fail on prompt_y_n not specified when disabling disk driver - ctx_11 = AKSPreviewContext( - self.cmd, - { - "disable_disk_driver": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=False, - ), self.assertRaises(DecoratorEarlyExitException): - ctx_11.get_disk_driver() - - # fail on prompt_y_n not specified when disabling file driver - ctx_12 = AKSPreviewContext( - self.cmd, - { - "disable_file_driver": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=False, - ), self.assertRaises(DecoratorEarlyExitException): - ctx_12.get_file_driver() - - # fail on prompt_y_n not specified when disabling snapshot controller - ctx_13 = AKSPreviewContext( - self.cmd, - { - "disable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=False, - ), self.assertRaises(DecoratorEarlyExitException): - ctx_13.get_snapshot_controller() - - - def test_get_storage_profile_create(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = None, - file_csi_driver = None, - snapshot_controller = None, - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_storage_profile(), storage_profile - ) - - # custom disk value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "disable_disk_driver": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on mutually exclusive enable_disk_driver and disable_disk_driver - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_2.get_disk_driver() - - # custom file value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_file_driver": True, - "disable_file_driver": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on mutually exclusive enable_file_driver and disable_file_driver - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_3.get_file_driver() - - # custom file value - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_snapshot_controller": True, - "disable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on mutually exclusive enable_snapshot_controller and disable_snapshot_controller - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_4.get_snapshot_controller() - - # default with csi driver enabled flag and no value for disk_driver_version passed - ctx_5 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "enable_file_driver": True, - "enable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - version = "v1", - ), - file_csi_driver = self.models.ManagedClusterStorageProfileFileCSIDriver( - enabled = True, - ), - snapshot_controller = self.models.ManagedClusterStorageProfileSnapshotController( - enabled = True, - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_5.attach_mc(mc) - self.assertEqual( - ctx_5.get_storage_profile(), storage_profile - ) - - # disk_driver_version value passed and enable-disk-driver passed - ctx_6 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "disk_driver_version": "v2", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - version = "v2", - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_6.attach_mc(mc) - self.assertEqual( - ctx_6.get_storage_profile(), storage_profile - ) - - # fail with enable-disk-driver as false and value passed for disk_driver_version - ctx_8 = AKSPreviewContext( - self.cmd, - { - "disable_disk_driver": True, - "disk_driver_version": "v2", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - - # fail on argument usage error - with self.assertRaises(ArgumentUsageError): - ctx_8.get_disk_driver() - - # enable-disk-driver, disk_driver_version, enable_file_driver and enable_snapshot_controller passed - ctx_9 = AKSPreviewContext( - self.cmd, - { - "enable_disk_driver": True, - "disk_driver_version": "v2", - "enable_file_driver": True, - "enable_snapshot_controller": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - version = "v2", - ), - file_csi_driver = self.models.ManagedClusterStorageProfileFileCSIDriver( - enabled = True, - ), - snapshot_controller = self.models.ManagedClusterStorageProfileSnapshotController( - enabled = True, - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_9.attach_mc(mc) - self.assertEqual( - ctx_9.get_storage_profile(), storage_profile - ) - - # pass when value passed for disk_driver_version without enable_disk_driver - ctx_10 = AKSPreviewContext( - self.cmd, - { - "disk_driver_version": "v2", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - storage_profile = ( - self.models.ManagedClusterStorageProfile( - disk_csi_driver = self.models.ManagedClusterStorageProfileDiskCSIDriver( - enabled = True, - version = "v2", - ), - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - ) - ctx_10.attach_mc(mc) - self.assertEqual( - ctx_10.get_storage_profile(), storage_profile - ) - - - def test_get_enable_pod_security_policy(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"enable_pod_security_policy": False}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_pod_security_policy(), False) - mc = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=True, - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_enable_pod_security_policy(), True) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_pod_security_policy": True, - "disable_pod_security_policy": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - # fail on mutually exclusive enable_pod_security_policy and disable_pod_security_policy - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_2.get_enable_pod_security_policy() - - def test_get_disable_pod_security_policy(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"disable_pod_security_policy": False}, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - self.assertEqual(ctx_1.get_disable_pod_security_policy(), False) - mc = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=False, - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_disable_pod_security_policy(), False) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_pod_security_policy": True, - "disable_pod_security_policy": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - # fail on mutually exclusive enable_pod_security_policy and disable_pod_security_policy - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_2.get_disable_pod_security_policy() - - def test_get_enable_managed_identity(self): - # custom value - ctx_1 = AKSPreviewContext( - self.cmd, - {"enable_managed_identity": False, "enable_pod_identity": True}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on enable_managed_identity not specified - with self.assertRaises(RequiredArgumentMissingError): - self.assertEqual(ctx_1.get_enable_managed_identity(), False) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"enable_pod_identity": True}, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - ) - ctx_2.attach_mc(mc_2) - # fail on managed identity not enabled - with self.assertRaises(RequiredArgumentMissingError): - self.assertEqual(ctx_2.get_enable_managed_identity(), False) - - def test_get_enable_pod_identity(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"enable_pod_identity": False}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_pod_identity(), False) - pod_identity_profile = ( - self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=pod_identity_profile, - ) - ctx_1.attach_mc(mc) - # fail on enable_managed_identity not specified - with self.assertRaises(RequiredArgumentMissingError): - self.assertEqual(ctx_1.get_enable_pod_identity(), True) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_managed_identity": True, - "enable_pod_identity": True, - "enable_pod_identity_with_kubenet": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - network_profile_2 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet" - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - network_profile=network_profile_2, - ) - ctx_2.attach_mc(mc_2) - # fail on enable_pod_identity_with_kubenet not specified - with self.assertRaises(RequiredArgumentMissingError): - self.assertEqual(ctx_2.get_enable_pod_identity(), True) - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_pod_identity": True, - "disable_pod_identity": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - mc_3 = self.models.ManagedCluster( - location="test_location", - identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), - ) - ctx_3.attach_mc(mc_3) - # fail on mutually exclusive enable_pod_identity and disable_pod_identity - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_3.get_enable_pod_identity() - - # custom value - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_pod_identity": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - mc_4 = self.models.ManagedCluster(location="test_location") - ctx_4.attach_mc(mc_4) - # fail on managed identity not enabled - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_enable_pod_identity() - - def test_get_disable_pod_identity(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"disable_pod_identity": False}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_disable_pod_identity(), False) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_pod_identity": True, - "disable_pod_identity": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - # fail on mutually exclusive enable_pod_identity and disable_pod_identity - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_2.get_disable_pod_identity() - - def test_get_enable_pod_identity_with_kubenet(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"enable_pod_identity_with_kubenet": False}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_pod_identity_with_kubenet(), False) - pod_identity_profile = ( - self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - allow_network_plugin_kubenet=True, - ) - ) - mc = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=pod_identity_profile, - ) - ctx_1.attach_mc(mc) - # fail on enable_managed_identity not specified - # with self.assertRaises(RequiredArgumentMissingError): - self.assertEqual(ctx_1.get_enable_pod_identity_with_kubenet(), True) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_managed_identity": True, - "enable_pod_identity": True, - "enable_pod_identity_with_kubenet": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - network_profile_2 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet" - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - network_profile=network_profile_2, - ) - ctx_2.attach_mc(mc_2) - # fail on enable_pod_identity_with_kubenet not specified - with self.assertRaises(RequiredArgumentMissingError): - self.assertEqual( - ctx_2.get_enable_pod_identity_with_kubenet(), False - ) - - def test_get_addon_consts(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - addon_consts = ctx_1.get_addon_consts() - ground_truth_addon_consts = { - "ADDONS": ADDONS, - "CONST_ACC_SGX_QUOTE_HELPER_ENABLED": CONST_ACC_SGX_QUOTE_HELPER_ENABLED, - "CONST_AZURE_POLICY_ADDON_NAME": CONST_AZURE_POLICY_ADDON_NAME, - "CONST_CONFCOM_ADDON_NAME": CONST_CONFCOM_ADDON_NAME, - "CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME": CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME, - "CONST_INGRESS_APPGW_ADDON_NAME": CONST_INGRESS_APPGW_ADDON_NAME, - "CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, - "CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME": CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME, - "CONST_INGRESS_APPGW_SUBNET_CIDR": CONST_INGRESS_APPGW_SUBNET_CIDR, - "CONST_INGRESS_APPGW_SUBNET_ID": CONST_INGRESS_APPGW_SUBNET_ID, - "CONST_INGRESS_APPGW_WATCH_NAMESPACE": CONST_INGRESS_APPGW_WATCH_NAMESPACE, - "CONST_KUBE_DASHBOARD_ADDON_NAME": CONST_KUBE_DASHBOARD_ADDON_NAME, - "CONST_MONITORING_ADDON_NAME": CONST_MONITORING_ADDON_NAME, - "CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID": CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID, - "CONST_OPEN_SERVICE_MESH_ADDON_NAME": CONST_OPEN_SERVICE_MESH_ADDON_NAME, - "CONST_VIRTUAL_NODE_ADDON_NAME": CONST_VIRTUAL_NODE_ADDON_NAME, - "CONST_VIRTUAL_NODE_SUBNET_NAME": CONST_VIRTUAL_NODE_SUBNET_NAME, - "CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME": CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, - "CONST_SECRET_ROTATION_ENABLED": CONST_SECRET_ROTATION_ENABLED, - "CONST_ROTATION_POLL_INTERVAL": CONST_ROTATION_POLL_INTERVAL, - # new addon consts in aks-preview - "CONST_GITOPS_ADDON_NAME": CONST_GITOPS_ADDON_NAME, - "CONST_MONITORING_USING_AAD_MSI_AUTH": CONST_MONITORING_USING_AAD_MSI_AUTH, - } - self.assertEqual(addon_consts, ground_truth_addon_consts) - - def test_get_appgw_subnet_prefix(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "appgw_subnet_prefix": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_appgw_subnet_prefix(), None) - addon_profiles_1 = { - CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix" - }, - ) - } - mc = self.models.ManagedCluster( - location="test_location", addon_profiles=addon_profiles_1 - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_appgw_subnet_prefix(), "test_appgw_subnet_prefix" - ) - - def test_get_enable_msi_auth_for_monitoring(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_msi_auth_for_monitoring": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), False) - addon_profiles_1 = { - CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( - enabled=True, - config={CONST_MONITORING_USING_AAD_MSI_AUTH: True}, - ) - } - mc = self.models.ManagedCluster( - location="test_location", addon_profiles=addon_profiles_1 - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_enable_msi_auth_for_monitoring(), True) - - def test_get_no_wait(self): - # custom value - ctx_1 = AKSPreviewContext( - self.cmd, - { - "no_wait": True, - "enable_msi_auth_for_monitoring": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - ctx_1.set_intermediate("monitoring", True, overwrite_exists=True) - self.assertEqual(ctx_1.get_no_wait(), False) - - def test_validate_gmsa_options(self): - # default - ctx = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - ctx._AKSPreviewContext__validate_gmsa_options(False, None, None, False) - ctx._AKSPreviewContext__validate_gmsa_options(True, None, None, True) - - # fail on yes & prompt_y_n not specified - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=False, - ), self.assertRaises(DecoratorEarlyExitException): - ctx._AKSPreviewContext__validate_gmsa_options( - True, None, None, False - ) - - # fail on gmsa_root_domain_name not specified - with self.assertRaises(RequiredArgumentMissingError): - ctx._AKSPreviewContext__validate_gmsa_options( - True, "test_gmsa_dns_server", None, False - ) - - # fail on enable_windows_gmsa not specified - with self.assertRaises(RequiredArgumentMissingError): - ctx._AKSPreviewContext__validate_gmsa_options( - False, None, "test_gmsa_root_domain_name", False - ) - - def test_get_enable_windows_gmsa(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_windows_gmsa": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_windows_gmsa(), False) - windows_gmsa_profile_1 = self.models.WindowsGmsaProfile(enabled=True) - windows_profile_1 = self.models.ManagedClusterWindowsProfile( - admin_username="test_admin_username", - gmsa_profile=windows_gmsa_profile_1, - ) - mc = self.models.ManagedCluster( - location="test_location", windows_profile=windows_profile_1 - ) - ctx_1.attach_mc(mc) - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=True, - ): - self.assertEqual(ctx_1.get_enable_windows_gmsa(), True) - - def test_get_gmsa_dns_server_and_root_domain_name(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_windows_gmsa": False, - "gmsa_dns_server": None, - "gmsa_root_domain_name": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual( - ctx_1.get_gmsa_dns_server_and_root_domain_name(), (None, None) - ) - windows_gmsa_profile_1 = self.models.WindowsGmsaProfile( - enabled=True, - dns_server="test_dns_server", - root_domain_name="test_root_domain_name", - ) - windows_profile_1 = self.models.ManagedClusterWindowsProfile( - admin_username="test_admin_username", - gmsa_profile=windows_gmsa_profile_1, - ) - mc = self.models.ManagedCluster( - location="test_location", windows_profile=windows_profile_1 - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_gmsa_dns_server_and_root_domain_name(), - ("test_dns_server", "test_root_domain_name"), - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_windows_gmsa": True, - "gmsa_dns_server": "test_gmsa_dns_server", - "gmsa_root_domain_name": "test_gmsa_root_domain_name", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - windows_gmsa_profile_2 = self.models.WindowsGmsaProfile( - enabled=True, - dns_server="test_dns_server", - root_domain_name=None, - ) - windows_profile_2 = self.models.ManagedClusterWindowsProfile( - admin_username="test_admin_username", - gmsa_profile=windows_gmsa_profile_2, - ) - mc = self.models.ManagedCluster( - location="test_location", windows_profile=windows_profile_2 - ) - ctx_2.attach_mc(mc) - # fail on inconsistent state - with self.assertRaises(CLIInternalError): - ctx_2.get_gmsa_dns_server_and_root_domain_name() - - def test_get_snapshot_id(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "snapshot_id": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_snapshot_id(), None) - creation_data = self.models.CreationData( - source_resource_id="test_source_resource_id" - ) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", creation_data=creation_data - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_snapshot_id(), "test_source_resource_id") - - def test_get_snapshot(self): - # custom value - ctx_1 = AKSPreviewContext( - self.cmd, - { - "snapshot_id": "test_source_resource_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock() - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual(ctx_1.get_snapshot(), mock_snapshot) - # test cache - self.assertEqual(ctx_1.get_snapshot(), mock_snapshot) - - def test_get_cluster_snapshot_id(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "cluster_snapshot_id": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_cluster_snapshot_id(), None) - creation_data = self.models.CreationData( - source_resource_id="test_source_resource_id" - ) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name") - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile], - creation_data=creation_data, - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_cluster_snapshot_id(), - "test_source_resource_id") - - def test_get_cluster_snapshot(self): - # custom value - ctx_1 = AKSPreviewContext( - self.cmd, - { - "cluster_snapshot_id": "test_source_resource_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock() - with patch( - "azext_aks_preview.decorator._get_cluster_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual(ctx_1.get_cluster_snapshot(), mock_snapshot) - # test cache - self.assertEqual(ctx_1.get_cluster_snapshot(), mock_snapshot) - - def test_get_host_group_id(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"host_group_id": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_host_group_id(), None) - agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", host_group_id="test_mc_host_group_id" - ) - mc_1 = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile_1] - ) - ctx_1.attach_mc(mc_1) - self.assertEqual( - ctx_1.get_host_group_id(), "test_mc_host_group_id" - ) - - # custom - ctx_2 = AKSPreviewContext( - self.cmd, - {"host_group_id": "test_host_group_id"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_host_group_id(), "test_host_group_id") - - def test_get_kubernetes_version(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"kubernetes_version": ""}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_kubernetes_version(), "") - mc = self.models.ManagedCluster( - location="test_location", - kubernetes_version="test_mc_kubernetes_version", - ) - ctx_1.attach_mc(mc) - self.assertEqual( - ctx_1.get_kubernetes_version(), "test_mc_kubernetes_version" - ) - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"kubernetes_version": "", "snapshot_id": "test_snapshot_id"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(kubernetes_version="test_kubernetes_version") - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual( - ctx_2.get_kubernetes_version(), "test_kubernetes_version" - ) - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "kubernetes_version": "custom_kubernetes_version", - "snapshot_id": "test_snapshot_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(kubernetes_version="test_kubernetes_version") - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual( - ctx_3.get_kubernetes_version(), "custom_kubernetes_version" - ) - - # custom value - ctx_4 = AKSPreviewContext( - self.cmd, - {"kubernetes_version": "", "cluster_snapshot_id": "test_cluster_snapshot_id"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock( - managed_cluster_properties_read_only=Mock(kubernetes_version="test_cluster_kubernetes_version")) - with patch( - "azext_aks_preview.decorator._get_cluster_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual( - ctx_4.get_kubernetes_version(), "test_cluster_kubernetes_version" - ) - - # custom value - ctx_5 = AKSPreviewContext( - self.cmd, - { - "cluster_snapshot_id": "test_cluster_snapshot_id", - "snapshot_id": "test_snapshot_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(kubernetes_version="test_kubernetes_version") - mock_mc_snapshot = Mock( - managed_cluster_properties_read_only=Mock(kubernetes_version="test_cluster_kubernetes_version")) - with patch( - "azext_aks_preview.decorator._get_cluster_snapshot", - return_value=mock_mc_snapshot, - ), patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual( - ctx_5.get_kubernetes_version(), "test_cluster_kubernetes_version" - ) - - def test_get_os_sku(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"os_sku": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_os_sku(), None) - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", os_sku="test_mc_os_sku" - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_os_sku(), "test_mc_os_sku") - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"os_sku": None, "snapshot_id": "test_snapshot_id"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(os_sku="test_os_sku") - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual(ctx_2.get_os_sku(), "test_os_sku") - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "os_sku": "custom_os_sku", - "snapshot_id": "test_snapshot_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(os_sku="test_os_sku") - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual(ctx_3.get_os_sku(), "custom_os_sku") - - def test_get_node_vm_size(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"node_vm_size": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_node_vm_size(), "Standard_DS2_v2") - agent_pool_profile = self.models.ManagedClusterAgentPoolProfile( - name="test_nodepool_name", vm_size="Standard_ABCD_v2" - ) - mc = self.models.ManagedCluster( - location="test_location", agent_pool_profiles=[agent_pool_profile] - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_node_vm_size(), "Standard_ABCD_v2") - - # custom value - ctx_2 = AKSPreviewContext( - self.cmd, - {"node_vm_size": None, "snapshot_id": "test_snapshot_id"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(vm_size="test_vm_size") - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual(ctx_2.get_node_vm_size(), "test_vm_size") - - # custom value - ctx_3 = AKSPreviewContext( - self.cmd, - { - "node_vm_size": "custom_node_vm_size", - "snapshot_id": "test_snapshot_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - mock_snapshot = Mock(vm_size="test_vm_size") - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - self.assertEqual(ctx_3.get_node_vm_size(), "custom_node_vm_size") - - def test_test_get_outbound_type(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - { - "outbound_type": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1._get_outbound_type(read_only=True), None) - self.assertEqual(ctx_1.get_outbound_type(), "loadBalancer") - network_profile_1 = self.models.ContainerServiceNetworkProfile( - outbound_type="test_outbound_type" - ) - mc = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_1 - ) - ctx_1.attach_mc(mc) - self.assertEqual(ctx_1.get_outbound_type(), "test_outbound_type") - - # invalid parameter - ctx_2 = AKSPreviewContext( - self.cmd, - { - "outbound_type": CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY, - "load_balancer_sku": "basic", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid load_balancer_sku (basic) when outbound_type is CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY - with self.assertRaises(InvalidArgumentValueError): - ctx_2.get_outbound_type() - - # invalid parameter - ctx_3 = AKSPreviewContext( - self.cmd, - { - "outbound_type": CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY, - "load_balancer_sku": "basic", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on invalid load_balancer_sku (basic) when outbound_type is CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY - with self.assertRaises(InvalidArgumentValueError): - ctx_3.get_outbound_type() - - # invalid parameter - ctx_4 = AKSPreviewContext( - self.cmd, - { - "outbound_type": CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY, - "vnet_subnet_id": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on vnet_subnet_id not specified - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_outbound_type() - - # invalid parameter - ctx_5 = AKSPreviewContext( - self.cmd, - { - "outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, - "vnet_subnet_id": None, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on vnet_subnet_id not specified - with self.assertRaises(RequiredArgumentMissingError): - ctx_5.get_outbound_type() - - # invalid parameter - ctx_6 = AKSPreviewContext( - self.cmd, - { - "outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, - "vnet_subnet_id": "test_vnet_subnet_id", - "load_balancer_managed_outbound_ip_count": 10, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - # fail on mutually exclusive outbound_type and managed_outbound_ip_count/outbound_ips/outbound_ip_prefixes of - # load balancer - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_6.get_outbound_type() - - # invalid parameter - ctx_7 = AKSPreviewContext( - self.cmd, - { - "outbound_type": CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING, - "vnet_subnet_id": "test_vnet_subnet_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - load_balancer_profile = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_ip_prefixes=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" - )( - public_ip_prefixes=[ - self.models.lb_models.get("ResourceReference")( - id="test_public_ip_prefix" - ) - ] - ) - ) - # fail on mutually exclusive outbound_type and managed_outbound_ip_count/outbound_ips/outbound_ip_prefixes of - # load balancer - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx_7.get_outbound_type( - load_balancer_profile=load_balancer_profile, - ) - - def test_get_oidc_issuer_profile__create_not_set(self): - ctx = AKSPreviewContext( - self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE - ) - self.assertIsNone(ctx.get_oidc_issuer_profile()) - - def test_get_oidc_issuer_profile__create_enable(self): - ctx = AKSPreviewContext( - self.cmd, - { - "enable_oidc_issuer": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - profile = ctx.get_oidc_issuer_profile() - self.assertIsNotNone(profile) - self.assertTrue(profile.enabled) - - def test_get_oidc_issuer_profile__update_not_set(self): - ctx = AKSPreviewContext( - self.cmd, {}, self.models, decorator_mode=DecoratorMode.UPDATE - ) - ctx.attach_mc(self.models.ManagedCluster(location="test_location")) - self.assertIsNone(ctx.get_oidc_issuer_profile()) - - def test_get_oidc_issuer_profile__update_not_set_with_previous_profile( - self, - ): - ctx = AKSPreviewContext( - self.cmd, {}, self.models, decorator_mode=DecoratorMode.UPDATE - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True - ) - ctx.attach_mc(self.models.ManagedCluster(location="test_location")) - self.assertIsNone(ctx.get_oidc_issuer_profile()) - - def test_get_oidc_issuer_profile__update_enable(self): - ctx = AKSPreviewContext( - self.cmd, - { - "enable_oidc_issuer": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - ctx.attach_mc(self.models.ManagedCluster(location="test_location")) - profile = ctx.get_oidc_issuer_profile() - self.assertIsNotNone(profile) - self.assertTrue(profile.enabled) - - def test_get_workload_identity_profile__create_no_set(self): - ctx = AKSPreviewContext( - self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE - ) - self.assertIsNone(ctx.get_workload_identity_profile()) - - def test_get_workload_identity_profile__create_enable_without_oidc_issuer(self): - ctx = AKSPreviewContext( - self.cmd, - { - "enable_workload_identity": True, - }, - self.models, decorator_mode=DecoratorMode.CREATE - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx.get_workload_identity_profile() - - def test_get_workload_identity_profile__create_enable_with_oidc_issuer(self): - ctx = AKSPreviewContext( - self.cmd, - { - "enable_oidc_issuer": True, - "enable_workload_identity": True, - }, - self.models, decorator_mode=DecoratorMode.CREATE - ) - profile = ctx.get_workload_identity_profile() - self.assertTrue(profile.enabled) - - def test_get_workload_identity_profile__update_not_set(self): - ctx = AKSPreviewContext( - self.cmd, {}, self.models, decorator_mode=DecoratorMode.UPDATE - ) - ctx.attach_mc(self.models.ManagedCluster(location="test_location")) - self.assertIsNone(ctx.get_workload_identity_profile()) - - def test_get_workload_identity_profile__update_with_enable_without_oidc_issuer(self): - ctx = AKSPreviewContext( - self.cmd, - { - "enable_workload_identity": True, - }, - self.models, decorator_mode=DecoratorMode.UPDATE - ) - ctx.attach_mc(self.models.ManagedCluster(location="test_location")) - with self.assertRaises(RequiredArgumentMissingError): - ctx.get_workload_identity_profile() - - def test_get_workload_identity_profile__update_with_enable(self): - for previous_enablement_status in [ - None, # preivous not set - True, # previous set to enabled=true - False, # previous set to enabled=false - ]: - ctx = AKSPreviewContext( - self.cmd, - { - "enable_workload_identity": True, - }, - self.models, decorator_mode=DecoratorMode.UPDATE - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile(enabled=True) - if previous_enablement_status is None: - mc.security_profile = None - else: - mc.security_profile = self.models.ManagedClusterSecurityProfile( - workload_identity=self.models.ManagedClusterSecurityProfileWorkloadIdentity( - enabled=previous_enablement_status - ) - ) - ctx.attach_mc(mc) - profile = ctx.get_workload_identity_profile() - self.assertTrue(profile.enabled) - - def test_get_workload_identity_profile__update_with_disable(self): - for previous_enablement_status in [ - None, # preivous not set - True, # previous set to enabled=true - False, # previous set to enabled=false - ]: - ctx = AKSPreviewContext( - self.cmd, - { - "enable_workload_identity": False, - }, - self.models, decorator_mode=DecoratorMode.UPDATE - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile(enabled=True) - if previous_enablement_status is None: - mc.security_profile = None - else: - mc.security_profile = self.models.ManagedClusterSecurityProfile( - workload_identity=self.models.ManagedClusterSecurityProfileWorkloadIdentity( - enabled=previous_enablement_status - ) - ) - ctx.attach_mc(mc) - profile = ctx.get_workload_identity_profile() - self.assertFalse(profile.enabled) - - def test_get_crg_id(self): - # default - ctx_1 = AKSPreviewContext( - self.cmd, - {"crg_id": "test_crg_id"}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_crg_id(), "test_crg_id") - - ctx_2 = AKSPreviewContext( - self.cmd, - {"crg_id": ""}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_crg_id(), "") - - ctx_3 = AKSPreviewContext( - self.cmd, - {"crg_id": None}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_crg_id(), None) - - def test_get_enable_azure_keyvault_kms(self): - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertIsNone(ctx_0.get_enable_azure_keyvault_kms()) - - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_azure_keyvault_kms(), False) - - key_id_1 = "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_2.attach_mc(mc) - self.assertEqual(ctx_2.get_enable_azure_keyvault_kms(), True) - - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": False, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_3.attach_mc(mc) - self.assertEqual(ctx_3.get_enable_azure_keyvault_kms(), False) - - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_enable_azure_keyvault_kms() - - ctx_5 = AKSPreviewContext( - self.cmd, - { - "azure_keyvault_kms_key_id": "test_azure_keyvault_kms_key_id", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_5.get_enable_azure_keyvault_kms() - - def test_get_azure_keyvault_kms_key_id(self): - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertIsNone(ctx_0.get_azure_keyvault_kms_key_id()) - - key_id_1 = "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_azure_keyvault_kms_key_id(), key_id_1) - - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - key_id_2 = "https://fakekeyvault2.vault.azure.net/secrets/fakekeyname2/fakekeyversion2" - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_2, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_2.attach_mc(mc) - self.assertEqual(ctx_2.get_azure_keyvault_kms_key_id(), key_id_2) - - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_2, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_3.attach_mc(mc) - self.assertEqual(ctx_3.get_azure_keyvault_kms_key_id(), key_id_1) - - ctx_4 = AKSPreviewContext( - self.cmd, - { - "azure_keyvault_kms_key_id": key_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_azure_keyvault_kms_key_id() - - ctx_5 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": False, - "azure_keyvault_kms_key_id": key_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_5.get_azure_keyvault_kms_key_id() - - def test_get_azure_keyvault_kms_key_vault_network_access(self): - key_vault_network_access_1 = "Public" - key_vault_network_access_2 = "Private" - - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertIsNone(ctx_0.get_azure_keyvault_kms_key_vault_network_access()) - - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": key_vault_network_access_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_azure_keyvault_kms_key_vault_network_access(), key_vault_network_access_1) - - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": key_vault_network_access_2, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_vault_network_access=key_vault_network_access_1, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_2.attach_mc(mc) - self.assertEqual(ctx_2.get_azure_keyvault_kms_key_vault_network_access(), key_vault_network_access_2) - - ctx_3 = AKSPreviewContext( - self.cmd, - { - "azure_keyvault_kms_key_vault_network_access": key_vault_network_access_2, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_3.get_azure_keyvault_kms_key_vault_network_access() - - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": False, - "azure_keyvault_kms_key_vault_network_access": key_vault_network_access_2, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_azure_keyvault_kms_key_vault_network_access() - - def test_get_azure_keyvault_kms_key_vault_resource_id(self): - key_vault_resource_id_1 = "/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo" - key_vault_resource_id_2 = "/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/bar/providers/Microsoft.KeyVault/vaults/bar" - - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertIsNone(ctx_0.get_azure_keyvault_kms_key_vault_resource_id()) - - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Public", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_azure_keyvault_kms_key_vault_resource_id(), None) - - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Public", - "azure_keyvault_kms_key_vault_resource_id": "", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_2.get_azure_keyvault_kms_key_vault_resource_id(), "") - - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": key_vault_resource_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_azure_keyvault_kms_key_vault_resource_id(), key_vault_resource_id_1) - - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": key_vault_resource_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_vault_network_access="Private", - key_vault_resource_id=key_vault_resource_id_2, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_4.attach_mc(mc) - self.assertEqual(ctx_4.get_azure_keyvault_kms_key_vault_resource_id(), key_vault_resource_id_2) - - ctx_5 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": key_vault_resource_id_2, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - security_profile = self.models.ManagedClusterSecurityProfile() - security_profile.azure_key_vault_kms = self.models.AzureKeyVaultKms( - enabled=True, - key_vault_network_access="Private", - key_vault_resource_id=key_vault_resource_id_1, - ) - mc = self.models.ManagedCluster( - location="test_location", - security_profile=security_profile, - ) - ctx_5.attach_mc(mc) - self.assertEqual(ctx_5.get_azure_keyvault_kms_key_vault_resource_id(), key_vault_resource_id_2) - - ctx_6 = AKSPreviewContext( - self.cmd, - { - "azure_keyvault_kms_key_vault_resource_id": key_vault_resource_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_6.get_azure_keyvault_kms_key_vault_resource_id() - - ctx_7 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": False, - "azure_keyvault_kms_key_vault_resource_id": key_vault_resource_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_7.get_azure_keyvault_kms_key_vault_resource_id() - - ctx_8 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Public", - "azure_keyvault_kms_key_vault_resource_id": key_vault_resource_id_1, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(ArgumentUsageError): - ctx_8.get_azure_keyvault_kms_key_vault_resource_id() - - ctx_9 = AKSPreviewContext( - self.cmd, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": "", - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(ArgumentUsageError): - ctx_9.get_azure_keyvault_kms_key_vault_resource_id() - - def test_get_updated_assign_kubelet_identity(self): - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - self.assertEqual(ctx_0.get_updated_assign_kubelet_identity(), "") - - ctx_1 = AKSPreviewContext( - self.cmd, - { - "assign_kubelet_identity": "fakeresourceid", - "yes": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_updated_assign_kubelet_identity(), "fakeresourceid") - - def test_get_enable_apiserver_vnet_integration(self): - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertIsNone(ctx_0.get_enable_apiserver_vnet_integration()) - - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_enable_apiserver_vnet_integration(), False) - - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": False, - "enable_private_cluster": False, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() - api_server_access_profile.enable_vnet_integration = True - api_server_access_profile.enable_private_cluster = True - mc = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=api_server_access_profile, - ) - ctx_2.attach_mc(mc) - self.assertEqual(ctx_2.get_enable_apiserver_vnet_integration(), True) - - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": True, - "enable_private_cluster": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_3.get_enable_apiserver_vnet_integration(), True) - - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": True, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_enable_apiserver_vnet_integration() - - ctx_5 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": True, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_5.get_enable_apiserver_vnet_integration() - - def test_get_apiserver_subnet_id(self): - ctx_0 = AKSPreviewContext( - self.cmd, - {}, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertIsNone(ctx_0.get_apiserver_subnet_id()) - - apiserver_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/apiserver" - vnet_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/node" - ctx_1 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": True, - "enable_private_cluster": True, - "apiserver_subnet_id": apiserver_subnet_id, - "vnet_subnet_id": vnet_subnet_id, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - self.assertEqual(ctx_1.get_apiserver_subnet_id(), apiserver_subnet_id) - - ctx_2 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": True, - "enable_private_cluster": True, - "vnet_subnet_id": vnet_subnet_id - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - api_server_access_profile = self.models.ManagedClusterAPIServerAccessProfile() - api_server_access_profile.subnet_id = apiserver_subnet_id - mc = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=api_server_access_profile, - ) - ctx_2.attach_mc(mc) - self.assertEqual(ctx_2.get_apiserver_subnet_id(), apiserver_subnet_id) - - ctx_3 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": True, - "apiserver_subnet_id": apiserver_subnet_id, - }, - self.models, - decorator_mode=DecoratorMode.UPDATE, - ) - self.assertEqual(ctx_3.get_apiserver_subnet_id(), apiserver_subnet_id) - - ctx_4 = AKSPreviewContext( - self.cmd, - { - "enable_private_cluster": True, - "apiserver_subnet_id": apiserver_subnet_id, - "vnet_subnet_id": vnet_subnet_id, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_4.get_apiserver_subnet_id() - - ctx_5 = AKSPreviewContext( - self.cmd, - { - "enable_apiserver_vnet_integration": False, - "apiserver_subnet_id": apiserver_subnet_id, - "vnet_subnet_id": vnet_subnet_id, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_5.get_apiserver_subnet_id() - - ctx_6 = AKSPreviewContext( - self.cmd, - { - "apiserver_subnet_id": apiserver_subnet_id, - }, - self.models, - decorator_mode=DecoratorMode.CREATE, - ) - with self.assertRaises(RequiredArgumentMissingError): - ctx_6.get_apiserver_subnet_id() - - def test_get_enable_keda(self): - # Returns the value of enable_keda if keda is None in existing profile. - ctx = AKSPreviewContext(self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE) - self.assertIsNone(ctx.get_enable_keda()) - - ctx = AKSPreviewContext(self.cmd, {"enable_keda": False}, self.models, decorator_mode=DecoratorMode.CREATE) - self.assertFalse(ctx.get_enable_keda()) - - ctx = AKSPreviewContext(self.cmd, {"enable_keda": True}, self.models, decorator_mode=DecoratorMode.CREATE) - self.assertTrue(ctx.get_enable_keda()) - - keda_none_mc = self.models.ManagedCluster( - location="test_location", - workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile()) - - keda_false_mc = self.models.ManagedCluster( - location="test_location", - workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( - keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=False))) - - keda_true_mc = self.models.ManagedCluster( - location="test_location", - workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( - keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True))) - - # Returns the value of keda in existing profile if enable_keda is None. - ctx = AKSPreviewContext(self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE) - ctx.attach_mc(keda_none_mc) - self.assertIsNone(ctx.get_enable_keda()) - - ctx = AKSPreviewContext(self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE) - ctx.attach_mc(keda_false_mc) - self.assertFalse(ctx.get_enable_keda()) - - ctx = AKSPreviewContext(self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE) - ctx.attach_mc(keda_true_mc) - self.assertTrue(ctx.get_enable_keda()) - - # Ignores the value of keda in existing profile in update-mode. - ctx = AKSPreviewContext(self.cmd, {}, self.models, decorator_mode=DecoratorMode.UPDATE) - ctx.attach_mc(keda_true_mc) - self.assertIsNone(ctx.get_enable_keda()) - - # Throws exception when both enable_keda and disable_keda are True. - ctx = AKSPreviewContext(self.cmd, {"enable_keda": True, "disable_keda": True}, self.models, decorator_mode=DecoratorMode.CREATE) - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx.get_enable_keda() - - # Throws exception when disable_keda and the value of keda in existing profile are True. - ctx = AKSPreviewContext(self.cmd, {"disable_keda": True}, self.models, decorator_mode=DecoratorMode.CREATE) - ctx.attach_mc(keda_true_mc) - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx.get_enable_keda() - - def test_get_disable_keda(self): - # Returns the value of disable_keda. - ctx = AKSPreviewContext(self.cmd, {}, self.models, decorator_mode=DecoratorMode.CREATE) - self.assertIsNone(ctx.get_disable_keda()) - - ctx = AKSPreviewContext(self.cmd, {"disable_keda": False}, self.models, decorator_mode=DecoratorMode.CREATE) - self.assertFalse(ctx.get_disable_keda()) - - ctx = AKSPreviewContext(self.cmd, {"disable_keda": True}, self.models, decorator_mode=DecoratorMode.CREATE) - self.assertTrue(ctx.get_disable_keda()) - - # Throws exception when both enable_keda and disable_keda are True. - ctx = AKSPreviewContext(self.cmd, {"enable_keda": True, "disable_keda": True}, self.models, decorator_mode=DecoratorMode.CREATE) - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx.get_disable_keda() - - keda_true_mc = self.models.ManagedCluster( - location="test_location", - workload_auto_scaler_profile=self.models.ManagedClusterWorkloadAutoScalerProfile( - keda=self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True))) - - # Throws exception when disable_keda and the value of keda in existing profile are True. - ctx = AKSPreviewContext(self.cmd, {"disable_keda": True}, self.models, decorator_mode=DecoratorMode.CREATE) - ctx.attach_mc(keda_true_mc) - with self.assertRaises(MutuallyExclusiveArgumentError): - ctx.get_disable_keda() - - -class AKSPreviewCreateDecoratorTestCase(unittest.TestCase): - def setUp(self): - # manually register CUSTOM_MGMT_AKS_PREVIEW - register_aks_preview_resource_type() - self.cli_ctx = MockCLI() - self.cmd = MockCmd(self.cli_ctx) - self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) - self.client = MockClient() - - def test_set_up_agent_pool_profiles(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "nodepool_name": "nodepool1", - "nodepool_tags": None, - "nodepool_labels": None, - "node_count": 3, - "node_vm_size": "Standard_DS2_v2", - "os_sku": None, - "vnet_subnet_id": None, - "pod_subnet_id": None, - "ppg": None, - "zones": None, - "enable_node_public_ip": False, - "enable_fips_image": False, - "node_public_ip_prefix_id": None, - "enable_encryption_at_host": False, - "enable_ultra_ssd": False, - "max_pods": 0, - "node_osdisk_size": 0, - "node_osdisk_type": None, - "enable_cluster_autoscaler": False, - "min_count": None, - "max_count": None, - "workload_runtime": None, - "gpu_instance_profile": None, - "kubelet_config": None, - "snapshot_id": None, - "host_group_id": None, - "crg_id": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_agent_pool_profiles(None) - dec_mc_1 = dec_1.set_up_agent_pool_profiles(mc_1) - agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile( - # Must be 12 chars or less before ACS RP adds to it - name="nodepool1", - tags=None, - node_labels=None, - count=3, - vm_size="Standard_DS2_v2", - os_type="Linux", - os_sku=None, - vnet_subnet_id=None, - pod_subnet_id=None, - proximity_placement_group_id=None, - availability_zones=None, - enable_node_public_ip=False, - enable_fips=False, - node_public_ip_prefix_id=None, - enable_encryption_at_host=False, - enable_ultra_ssd=False, - max_pods=None, - type="VirtualMachineScaleSets", - mode="System", - os_disk_size_gb=None, - os_disk_type=None, - enable_auto_scaling=False, - min_count=None, - max_count=None, - workload_runtime=None, - gpu_instance_profile=None, - kubelet_config=None, - creation_data=None, - host_group_id=None, - capacity_reservation_group_id=None, - ) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location") - ground_truth_mc_1.agent_pool_profiles = [agent_pool_profile_1] - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "nodepool_name": "test_np_name1234", - "nodepool_tags": {"k1": "v1"}, - "nodepool_labels": {"k1": "v1", "k2": "v2"}, - "node_count": 10, - "node_vm_size": "Standard_DSx_vy", - "os_sku": None, - "vnet_subnet_id": "test_vnet_subnet_id", - "pod_subnet_id": "test_pod_subnet_id", - "ppg": "test_ppg_id", - "zones": ["tz1", "tz2"], - "enable_node_public_ip": True, - "enable_fips_image": True, - "node_public_ip_prefix_id": "test_node_public_ip_prefix_id", - "enable_encryption_at_host": True, - "enable_ultra_ssd": True, - "max_pods": 50, - "node_osdisk_size": 100, - "node_osdisk_type": "test_os_disk_type", - "enable_cluster_autoscaler": True, - "min_count": 5, - "max_count": 20, - "workload_runtime": "test_workload_runtime", - "gpu_instance_profile": "test_gpu_instance_profile", - "kubelet_config": _get_test_data_file("kubeletconfig.json"), - "linux_os_config": _get_test_data_file("linuxosconfig.json"), - "snapshot_id": "test_snapshot_id", - "host_group_id": "test_host_group_id", - "crg_id": "test_crg_id", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - mock_snapshot = Mock( - kubernetes_version="", - os_sku="snapshot_os_sku", - vm_size="snapshot_vm_size", - ) - with patch( - "azext_aks_preview.decorator._get_snapshot", - return_value=mock_snapshot, - ): - dec_mc_2 = dec_2.set_up_agent_pool_profiles(mc_2) - agent_pool_profile_2 = self.models.ManagedClusterAgentPoolProfile( - # Must be 12 chars or less before ACS RP adds to it - name="test_np_name", - tags={"k1": "v1"}, - node_labels={"k1": "v1", "k2": "v2"}, - count=10, - vm_size="Standard_DSx_vy", - os_type="Linux", - os_sku="snapshot_os_sku", - vnet_subnet_id="test_vnet_subnet_id", - pod_subnet_id="test_pod_subnet_id", - proximity_placement_group_id="test_ppg_id", - availability_zones=["tz1", "tz2"], - enable_node_public_ip=True, - enable_fips=True, - node_public_ip_prefix_id="test_node_public_ip_prefix_id", - enable_encryption_at_host=True, - enable_ultra_ssd=True, - max_pods=50, - type="VirtualMachineScaleSets", - mode="System", - os_disk_size_gb=100, - os_disk_type="test_os_disk_type", - enable_auto_scaling=True, - min_count=5, - max_count=20, - workload_runtime="test_workload_runtime", - gpu_instance_profile="test_gpu_instance_profile", - kubelet_config={ - "cpuManagerPolicy": "static", - "cpuCfsQuota": True, - "cpuCfsQuotaPeriod": "200ms", - "imageGcHighThreshold": 90, - "imageGcLowThreshold": 70, - "topologyManagerPolicy": "best-effort", - "allowedUnsafeSysctls": ["kernel.msg*", "net.*"], - "failSwapOn": False, - "containerLogMaxFiles": 10, - "podMaxPids": 120, - "containerLogMaxSizeMB": 20, - }, - linux_os_config={ - "transparentHugePageEnabled": "madvise", - "transparentHugePageDefrag": "defer+madvise", - "swapFileSizeMB": 1500, - "sysctls": { - "netCoreSomaxconn": 163849, - "netIpv4TcpTwReuse": True, - "netIpv4IpLocalPortRange": "32000 60000", - }, - }, - creation_data=self.models.CreationData( - source_resource_id="test_snapshot_id" - ), - capacity_reservation_group_id="test_crg_id", - host_group_id="test_host_group_id", - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location") - ground_truth_mc_2.agent_pool_profiles = [agent_pool_profile_2] - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_set_up_http_proxy_config(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "http_proxy_config": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_http_proxy_config(None) - dec_mc_1 = dec_1.set_up_http_proxy_config(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location") - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {"http_proxy_config": _get_test_data_file("httpproxyconfig.json")}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_http_proxy_config(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - http_proxy_config={ - "httpProxy": "http://cli-proxy-vm:3128/", - "httpsProxy": "https://cli-proxy-vm:3129/", - "noProxy": ["localhost", "127.0.0.1"], - "trustedCa": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZHekNDQXdPZ0F3SUJBZ0lVT1FvajhDTFpkc2Vscjk3cnZJd3g1T0xEc3V3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01ZMnhwTFhCeWIzaDVMWFp0TUI0WERUSXlNRE13T0RFMk5EUTBOMW9YRFRNeQpNRE13TlRFMk5EUTBOMW93RnpFVk1CTUdBMVVFQXd3TVkyeHBMWEJ5YjNoNUxYWnRNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEvTVB0VjVCVFB0NmNxaTRSZE1sbXIzeUlzYTJ1anpjaHh2NGgKanNDMUR0blJnb3M1UzQxUEgwcmkrM3RUU1ZYMzJ5cndzWStyRDFZUnVwbTZsbUU3R2hVNUkwR2k5b3prU0YwWgpLS2FKaTJveXBVL0ZCK1FQcXpvQ1JzTUV3R0NibUtGVmw4VnVoeW5kWEs0YjRrYmxyOWJsL2V1d2Q3TThTYnZ6CldVam5lRHJRc2lJc3J6UFQ0S0FaTHFjdHpEZTRsbFBUN1lLYTMzaGlFUE9mdldpWitkcWthUUE5UDY0eFhTeW4KZkhYOHVWQUozdUJWSmVHeEQwcGtOSjdqT3J5YVV1SEh1Y1U4UzltSWpuS2pBQjVhUGpMSDV4QXM2bG1iMzEyMgp5KzF0bkVBbVhNNTBEK1VvRWpmUzZIT2I1cmRpcVhHdmMxS2JvS2p6a1BDUnh4MmE3MmN2ZWdVajZtZ0FKTHpnClRoRTFsbGNtVTRpemd4b0lNa1ZwR1RWT0xMbjFWRkt1TmhNWkN2RnZLZ25Lb0F2M0cwRlVuZldFYVJSalNObUQKTFlhTURUNUg5WnQycERJVWpVR1N0Q2w3Z1J6TUVuWXdKTzN5aURwZzQzbzVkUnlzVXlMOUpmRS9OaDdUZzYxOApuOGNKL1c3K1FZYllsanVyYXA4cjdRRlNyb2wzVkNoRkIrT29yNW5pK3ZvaFNBd0pmMFVsTXBHM3hXbXkxVUk0ClRGS2ZGR1JSVHpyUCs3Yk53WDVoSXZJeTVWdGd5YU9xSndUeGhpL0pkeHRPcjJ0QTVyQ1c3K0N0Z1N2emtxTkUKWHlyN3ZrWWdwNlk1TFpneTR0VWpLMEswT1VnVmRqQk9oRHBFenkvRkY4dzFGRVZnSjBxWS9yV2NMa0JIRFQ4Ugp2SmtoaW84Q0F3RUFBYU5mTUYwd0Z3WURWUjBSQkJBd0RvSU1ZMnhwTFhCeWIzaDVMWFp0TUJJR0ExVWRFd0VCCi93UUlNQVlCQWY4Q0FRQXdEd1lEVlIwUEFRSC9CQVVEQXdmbmdEQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0QKQWdZSUt3WUJCUVVIQXdFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dJQkFBb21qQ3lYdmFRT3hnWUs1MHNYTEIyKwp3QWZkc3g1bm5HZGd5Zmc0dXJXMlZtMTVEaEd2STdDL250cTBkWXkyNE4vVWJHN1VEWHZseUxJSkZxMVhQN25mCnBaRzBWQ2paNjlibXhLbTNaOG0wL0F3TXZpOGU5ZWR5OHY5a05CQ3dMR2tIYkE4WW85Q0lpUWdlbGZwcDF2VWgKYm5OQmhhRCtpdTZDZmlDTHdnSmIvaXc3ZW8vQ3lvWnF4K3RqWGFPMnpYdm00cC8rUUlmQU9ndEdRTEZVOGNmWgovZ1VyVHE1Z0ZxMCtQOUd5V3NBVEpGNnE3TDZXWlpqME91VHNlN2Y0Q1NpajZNbk9NTXhBK0pvYWhKejdsc1NpClRKSEl3RXA1ci9SeWhweWVwUXhGWWNVSDVKSmY5cmFoWExXWmkrOVRqeFNNMll5aHhmUlBzaVVFdUdEb2s3OFEKbS9RUGlDaTlKSmIxb2NtVGpBVjh4RFNob2NpdlhPRnlobjZMbjc3dkxqWStBYXZ0V0RoUXRocHVQeHNMdFZ6bQplMFNIMTFkRUxSdGI3NG1xWE9yTzdmdS8rSUJzM0pxTEUvVSt4dXhRdHZHOHZHMXlES0hIU1pxUzJoL1dzNGw0Ck5pQXNoSGdlaFFEUEJjWTl3WVl6ZkJnWnBPVU16ZERmNTB4K0ZTbFk0M1dPSkp6U3VRaDR5WjArM2t5Z3VDRjgKcm5NTFNjZXlTNGNpNExtSi9LQ1N1R2RmNlhWWXo4QkU5Z2pqanBDUDZxeTBVbFJlZldzL2lnL3djSysyYkYxVApuL1l2KzZnWGVDVEhKNzVxRElQbHA3RFJVVWswZmJNajRiSWthb2dXV2s0emYydThteFpMYTBsZVBLTktaTi9tCkdDdkZ3cjNlaSt1LzhjenA1RjdUCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" - }, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_set_up_node_resource_group(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "node_resource_group": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_node_resource_group(None) - dec_mc_1 = dec_1.set_up_node_resource_group(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location") - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {"node_resource_group": "test_node_resource_group"}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_node_resource_group(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - node_resource_group="test_node_resource_group", - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_set_up_network_profile(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "load_balancer_sku": None, - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - "load_balancer_outbound_ports": None, - "load_balancer_idle_timeout": None, - "outbound_type": None, - "network_plugin": None, - "pod_cidr": None, - "service_cidr": None, - "dns_service_ip": None, - "docker_bridge_cidr": None, - "network_policy": None, - "nat_gateway_managed_outbound_ip_count": None, - "nat_gateway_idle_timeout": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_network_profile(None) - dec_mc_1 = dec_1.set_up_network_profile(mc_1) - - network_profile_1 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", # default value in SDK - pod_cidr="10.244.0.0/16", # default value in SDK - service_cidr="10.0.0.0/16", # default value in SDK - dns_service_ip="10.0.0.10", # default value in SDK - docker_bridge_cidr="172.17.0.1/16", # default value in SDK - load_balancer_sku="standard", - outbound_type="loadBalancer", - ) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_1 - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "load_balancer_sku": None, - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - "load_balancer_outbound_ports": None, - "load_balancer_idle_timeout": None, - "outbound_type": None, - "network_plugin": "kubenet", - "pod_cidr": "10.246.0.0/16", - "service_cidr": None, - "dns_service_ip": None, - "docker_bridge_cidr": None, - "network_policy": None, - "nat_gateway_managed_outbound_ip_count": 10, - "nat_gateway_idle_timeout": 20, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_network_profile(mc_2) - - nat_gateway_profile_2 = self.models.nat_gateway_models.ManagedClusterNATGatewayProfile( - managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile( - count=10 - ), - idle_timeout_in_minutes=20, - ) - network_profile_2 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - pod_cidr="10.246.0.0/16", - service_cidr=None, # overwritten to None - dns_service_ip=None, # overwritten to None - docker_bridge_cidr=None, # overwritten to None - load_balancer_sku="standard", - outbound_type="loadBalancer", - nat_gateway_profile=nat_gateway_profile_2, - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_2 - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - # dual-stack - dec_3 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "load_balancer_sku": None, - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": 3, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - "load_balancer_outbound_ports": None, - "load_balancer_idle_timeout": None, - "outbound_type": None, - "network_plugin": "kubenet", - "pod_cidr": None, - "service_cidr": None, - "pod_cidrs": "10.246.0.0/16,2001:abcd::/64", - "service_cidrs": "10.0.0.0/16,2001:ffff::/108", - "ip_families": "IPv4,IPv6", - "dns_service_ip": None, - "docker_bridge_cidr": None, - "network_policy": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_3 = self.models.ManagedCluster(location="test_location") - dec_mc_3 = dec_3.set_up_network_profile(mc_3) - - network_profile_3 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - pod_cidr=None, # overwritten to None - service_cidr=None, # overwritten to None - dns_service_ip=None, # overwritten to None - docker_bridge_cidr=None, # overwritten to None - load_balancer_sku="standard", - outbound_type="loadBalancer", - ip_families=["IPv4", "IPv6"], - pod_cidrs=["10.246.0.0/16", "2001:abcd::/64"], - service_cidrs=["10.0.0.0/16", "2001:ffff::/108"], - ) - load_balancer_profile = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )( - count=1, - count_ipv6=3, - ) - ) - - network_profile_3.load_balancer_profile = load_balancer_profile - - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_3 - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - def test_set_up_pod_security_policy(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_pod_security_policy": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_pod_security_policy(None) - dec_mc_1 = dec_1.set_up_pod_security_policy(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", enable_pod_security_policy=False - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {"enable_pod_security_policy": True}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_pod_security_policy(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=True, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_set_up_pod_identity_profile(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_pod_identity": False, - "enable_pod_identity_with_kubenet": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_pod_identity_profile(None) - dec_mc_1 = dec_1.set_up_pod_identity_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location") - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_managed_identity": True, - "enable_pod_identity": True, - "enable_pod_identity_with_kubenet": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - network_profile_2 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet" - ) - mc_2 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_2 - ) - dec_mc_2 = dec_2.set_up_pod_identity_profile(mc_2) - network_profile_2 = self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet" - ) - pod_identity_profile_2 = ( - self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - allow_network_plugin_kubenet=True, - ) - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - network_profile=network_profile_2, - pod_identity_profile=pod_identity_profile_2, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_build_monitoring_addon_profile(self): - # default - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "resource_group_name": "test_rg_name", - "name": "test_name", - "location": "test_location", - "enable_addons": "monitoring", - "workspace_resource_id": "test_workspace_resource_id", - "enable_msi_auth_for_monitoring": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - dec_1.context.set_intermediate( - "subscription_id", "test_subscription_id" - ) - - with patch( - "azext_aks_preview.decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): - self.assertEqual( - dec_1.context.get_intermediate("monitoring"), None) - monitoring_addon_profile = dec_1.build_monitoring_addon_profile() - ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id", - CONST_MONITORING_USING_AAD_MSI_AUTH: False, - }, - ) - self.assertEqual( - monitoring_addon_profile, ground_truth_monitoring_addon_profile - ) - self.assertEqual( - dec_1.context.get_intermediate("monitoring"), True) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "resource_group_name": "test_rg_name", - "name": "test_name", - "location": "test_location", - "enable_addons": "monitoring", - "workspace_resource_id": "test_workspace_resource_id", - "enable_msi_auth_for_monitoring": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - dec_2.context.set_intermediate( - "subscription_id", "test_subscription_id" - ) - - with patch( - "azext_aks_preview.decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): - self.assertEqual( - dec_2.context.get_intermediate("monitoring"), None) - monitoring_addon_profile = dec_2.build_monitoring_addon_profile() - ground_truth_monitoring_addon_profile = self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id", - CONST_MONITORING_USING_AAD_MSI_AUTH: True, - }, - ) - self.assertEqual( - monitoring_addon_profile, ground_truth_monitoring_addon_profile - ) - self.assertEqual( - dec_2.context.get_intermediate("monitoring"), True) - - def test_build_ingress_appgw_addon_profile(self): - # default - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - self.assertEqual( - dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None - ) - ingress_appgw_addon_profile = dec_1.build_ingress_appgw_addon_profile() - ground_truth_ingress_appgw_addon_profile = ( - self.models.ManagedClusterAddonProfile( - enabled=True, - config={}, - ) - ) - self.assertEqual( - ingress_appgw_addon_profile, - ground_truth_ingress_appgw_addon_profile, - ) - self.assertEqual( - dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), True - ) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "appgw_name": "test_appgw_name", - "appgw_subnet_prefix": "test_appgw_subnet_prefix", - "appgw_id": "test_appgw_id", - "appgw_subnet_id": "test_appgw_subnet_id", - "appgw_watch_namespace": "test_appgw_watch_namespace", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - self.assertEqual( - dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), None - ) - ingress_appgw_addon_profile = dec_2.build_ingress_appgw_addon_profile() - ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name", - CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix", - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id", - CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id", - CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace", - }, - ) - self.assertEqual( - ingress_appgw_addon_profile, - ground_truth_ingress_appgw_addon_profile, - ) - self.assertEqual( - dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True - ) - - # custom value - dec_3 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "appgw_name": "test_appgw_name", - "appgw_subnet_prefix": "test_appgw_subnet_prefix", - "appgw_subnet_cidr": "test_appgw_subnet_cidr", - "appgw_id": "test_appgw_id", - "appgw_subnet_id": "test_appgw_subnet_id", - "appgw_watch_namespace": "test_appgw_watch_namespace", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - self.assertEqual( - dec_3.context.get_intermediate("ingress_appgw_addon_enabled"), None - ) - ingress_appgw_addon_profile = dec_3.build_ingress_appgw_addon_profile() - ground_truth_ingress_appgw_addon_profile = self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name", - CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_cidr", - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id", - CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id", - CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace", - }, - ) - self.assertEqual( - ingress_appgw_addon_profile, - ground_truth_ingress_appgw_addon_profile, - ) - self.assertEqual( - dec_3.context.get_intermediate("ingress_appgw_addon_enabled"), True - ) - - def test_build_gitops_addon_profile(self): - # default - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - gitops_addon_profile = dec_1.build_gitops_addon_profile() - ground_truth_gitops_addon_profile = ( - self.models.ManagedClusterAddonProfile( - enabled=True, - ) - ) - self.assertEqual( - gitops_addon_profile, ground_truth_gitops_addon_profile - ) - - def test_set_up_addon_profiles(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_addons": None, - "workspace_resource_id": None, - "aci_subnet_name": None, - "appgw_name": None, - "appgw_subnet_cidr": None, - "appgw_id": None, - "appgw_subnet_id": None, - "appgw_watch_namespace": None, - "enable_sgxquotehelper": False, - "enable_secret_rotation": False, - "rotation_poll_interval": None, - "appgw_subnet_prefix": None, - "enable_msi_auth_for_monitoring": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_addon_profiles(None) - dec_mc_1 = dec_1.set_up_addon_profiles(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", addon_profiles={} - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - self.assertEqual(dec_1.context.get_intermediate("monitoring"), None) - self.assertEqual( - dec_1.context.get_intermediate("enable_virtual_node"), None - ) - self.assertEqual( - dec_1.context.get_intermediate("ingress_appgw_addon_enabled"), None - ) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "name": "test_name", - "resource_group_name": "test_rg_name", - "location": "test_location", - "vnet_subnet_id": "test_vnet_subnet_id", - "enable_addons": "monitoring,ingress-appgw,gitops", - "workspace_resource_id": "test_workspace_resource_id", - "enable_msi_auth_for_monitoring": True, - "appgw_name": "test_appgw_name", - "appgw_subnet_prefix": "test_appgw_subnet_prefix", - "appgw_id": "test_appgw_id", - "appgw_subnet_id": "test_appgw_subnet_id", - "appgw_watch_namespace": "test_appgw_watch_namespace", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - dec_2.context.set_intermediate( - "subscription_id", "test_subscription_id" - ) - mc_2 = self.models.ManagedCluster(location="test_location") - with patch( - "azext_aks_preview.decorator.ensure_container_insights_for_monitoring", - return_value=None, - ): - dec_mc_2 = dec_2.set_up_addon_profiles(mc_2) - - addon_profiles_2 = { - CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: "/test_workspace_resource_id", - CONST_MONITORING_USING_AAD_MSI_AUTH: True, - }, - ), - CONST_INGRESS_APPGW_ADDON_NAME: self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME: "test_appgw_name", - CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID: "test_appgw_id", - CONST_INGRESS_APPGW_SUBNET_ID: "test_appgw_subnet_id", - CONST_INGRESS_APPGW_SUBNET_CIDR: "test_appgw_subnet_prefix", - CONST_INGRESS_APPGW_WATCH_NAMESPACE: "test_appgw_watch_namespace", - }, - ), - CONST_GITOPS_ADDON_NAME: self.models.ManagedClusterAddonProfile( - enabled=True, - ), - } - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", addon_profiles=addon_profiles_2 - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - self.assertEqual(dec_2.context.get_intermediate("monitoring"), True) - self.assertEqual( - dec_2.context.get_intermediate("enable_virtual_node"), None - ) - self.assertEqual( - dec_2.context.get_intermediate("ingress_appgw_addon_enabled"), True - ) - - def test_set_up_windows_profile(self): - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "windows_admin_username": None, - "windows_admin_password": None, - "enable_ahub": False, - "enable_windows_gmsa": False, - "gmsa_dns_server": None, - "gmsa_root_domain_name": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.set_up_windows_profile(None) - dec_mc_1 = dec_1.set_up_windows_profile(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location") - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")] - "windows_admin_username": "test_win_admin_name", - "windows_admin_password": "test_win_admin_password", - "enable_ahub": True, - "enable_windows_gmsa": True, - "gmsa_dns_server": "test_gmsa_dns_server", - "gmsa_root_domain_name": "test_gmsa_root_domain_name", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_windows_profile(mc_2) - - windows_gmsa_profile_2 = self.models.WindowsGmsaProfile( - enabled=True, - dns_server="test_gmsa_dns_server", - root_domain_name="test_gmsa_root_domain_name", - ) - windows_profile_2 = self.models.ManagedClusterWindowsProfile( - # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")] - admin_username="test_win_admin_name", - admin_password="test_win_admin_password", - license_type="Windows_Server", - gmsa_profile=windows_gmsa_profile_2, - ) - - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", windows_profile=windows_profile_2 - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_set_up_oidc_issuer_profile__default_value(self): - dec = AKSPreviewCreateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - updated_mc = dec.set_up_oidc_issuer_profile(mc) - self.assertIsNone(updated_mc.oidc_issuer_profile) - - def test_set_up_oidc_issuer_profile__enabled(self): - dec = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_oidc_issuer": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc = self.models.ManagedCluster(location="test_location") - updated_mc = dec.set_up_oidc_issuer_profile(mc) - self.assertIsNotNone(updated_mc.oidc_issuer_profile) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) - - def test_set_up_oidc_issuer_profile__enabled_mc_enabled(self): - dec = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_oidc_issuer": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True - ) - updated_mc = dec.set_up_oidc_issuer_profile(mc) - self.assertIsNotNone(updated_mc.oidc_issuer_profile) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) - - def test_set_up_workload_identity_profile__default_value(self): - dec = AKSPreviewCreateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - updated_mc = dec.set_up_workload_identity_profile(mc) - self.assertIsNone(updated_mc.security_profile) - - def test_set_up_workload_identity_profile__default_value_with_security_profile(self): - dec = AKSPreviewCreateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.security_profile = self.models.ManagedClusterSecurityProfile() - updated_mc = dec.set_up_workload_identity_profile(mc) - self.assertIsNone(updated_mc.security_profile.workload_identity) - - def test_set_up_workload_identity_profile__enabled(self): - dec = AKSPreviewCreateDecorator( - self.cmd, self.client, - { - "enable_oidc_issuer": True, - "enable_workload_identity": True, - }, - CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - updated_mc = dec.set_up_workload_identity_profile(mc) - self.assertTrue(updated_mc.security_profile.workload_identity.enabled) - - def test_set_up_azure_keyvault_kms(self): - key_id_1 = "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" - - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster( - location="test_location" - ) - dec_mc_1 = dec_1.set_up_azure_keyvault_kms(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location" - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - "azure_keyvault_kms_key_vault_network_access": "Public", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_azure_keyvault_kms(mc_2) - - ground_truth_azure_keyvault_kms_profile_2 = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - key_vault_network_access="Public", - ) - ground_truth_security_profile_2 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_2, - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - security_profile=ground_truth_security_profile_2, - ) - - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - dec_3 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_3 = self.models.ManagedCluster(location="test_location") - dec_mc_3 = dec_3.set_up_azure_keyvault_kms(mc_3) - - ground_truth_azure_keyvault_kms_profile_3 = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - key_vault_network_access="Private", - key_vault_resource_id="/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", - ) - ground_truth_security_profile_3 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_3, - ) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - security_profile=ground_truth_security_profile_3, - ) - - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - def test_set_up_api_server_access_profile(self): - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster( - location="test_location" - ) - dec_mc_1 = dec_1.set_up_api_server_access_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location" - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - apiserver_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/apiserver" - vnet_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/node" - dec_2 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "enable_apiserver_vnet_integration": True, - "enable_private_cluster": True, - "apiserver_subnet_id": apiserver_subnet_id, - "vnet_subnet_id": vnet_subnet_id, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_mc_2 = dec_2.set_up_api_server_access_profile(mc_2) - ground_truth_api_server_access_profile_2 = self.models.ManagedClusterAPIServerAccessProfile( - enable_vnet_integration=True, - subnet_id=apiserver_subnet_id, - enable_private_cluster=True, - authorized_ip_ranges=[], - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=ground_truth_api_server_access_profile_2, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_set_up_creationdata_of_cluster_snapshot(self): - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "cluster_snapshot_id": "test_cluster_snapshot_id", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster(location="test_location") - dec_mc_1 = dec_1.set_up_creationdata_of_cluster_snapshot(mc_1) - cd = self.models.CreationData( - source_resource_id="test_cluster_snapshot_id" - ) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", creation_data=cd) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - def test_set_up_workload_auto_scaler_profile(self): - # Throws exception when incorrect mc object is passed. - dec = AKSPreviewCreateDecorator(self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW) - with self.assertRaisesRegex(CLIInternalError, "^Unexpected mc object with type ''\.$"): - dec.set_up_workload_auto_scaler_profile(None) - - # Sets profile to None without raw parameters. - dec = AKSPreviewCreateDecorator(self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - mc_out = dec.set_up_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNone(mc_out.workload_auto_scaler_profile) - - # Sets profile to None if enable_keda is False. - dec = AKSPreviewCreateDecorator(self.cmd, self.client, {"enable_keda": False}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - mc_out = dec.set_up_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNone(mc_out.workload_auto_scaler_profile) - - # Sets profile with keda enabled if enable_keda is True. - dec = AKSPreviewCreateDecorator(self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - mc_out = dec.set_up_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - def test_construct_mc_preview_profile(self): - import inspect - - import paramiko - from azext_aks_preview.custom import aks_create - from azure.cli.command_modules.acs.decorator import AKSParamDict - - optional_params = {} - positional_params = [] - for _, v in inspect.signature(aks_create).parameters.items(): - if v.default != v.empty: - optional_params[v.name] = v.default - else: - positional_params.append(v.name) - ground_truth_positional_params = [ - "cmd", - "client", - "resource_group_name", - "name", - "ssh_key_value", - ] - self.assertEqual(positional_params, ground_truth_positional_params) - - # prepare ssh key - key = paramiko.RSAKey.generate(2048) - public_key = "{} {}".format(key.get_name(), key.get_base64()) - - # prepare a dictionary of default parameters - raw_param_dict = { - "resource_group_name": "test_rg_name", - "name": "test_name", - "ssh_key_value": public_key, - } - raw_param_dict.update(optional_params) - raw_param_dict = AKSParamDict(raw_param_dict) - - # default value in `aks_create` - dec_1 = AKSPreviewCreateDecorator( - self.cmd, self.client, raw_param_dict, CUSTOM_MGMT_AKS_PREVIEW - ) - - mock_profile = Mock( - get_subscription_id=Mock(return_value="1234-5678-9012") - ) - with patch( - "azure.cli.command_modules.acs.decorator.get_rg_location", - return_value="test_location", - ), patch( - "azure.cli.command_modules.acs.decorator.Profile", - return_value=mock_profile, - ): - dec_mc_1 = dec_1.construct_mc_preview_profile() - - agent_pool_profile_1 = self.models.ManagedClusterAgentPoolProfile( - # Must be 12 chars or less before ACS RP adds to it - name="nodepool1", - # tags=None, - # node_labels=None, - count=3, - vm_size="Standard_DS2_v2", - os_type="Linux", - enable_node_public_ip=False, - enable_encryption_at_host=False, - enable_ultra_ssd=False, - type="VirtualMachineScaleSets", - mode="System", - enable_auto_scaling=False, - enable_fips=False, - enable_custom_ca_trust=False, - ) - ssh_config_1 = self.models.ContainerServiceSshConfiguration( - public_keys=[ - self.models.ContainerServiceSshPublicKey(key_data=public_key) - ] - ) - linux_profile_1 = self.models.ContainerServiceLinuxProfile( - admin_username="azureuser", ssh=ssh_config_1 - ) - network_profile_1 = self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - ) - identity_1 = self.models.ManagedClusterIdentity(type="SystemAssigned") - - storage_profile_1 = self.models.ManagedClusterStorageProfile( - disk_csi_driver = None, - file_csi_driver = None, - snapshot_controller = None, - ) - - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - dns_prefix="testname-testrgname-1234-5", - kubernetes_version="", - addon_profiles={}, - enable_rbac=True, - agent_pool_profiles=[agent_pool_profile_1], - linux_profile=linux_profile_1, - network_profile=network_profile_1, - identity=identity_1, - disable_local_accounts=False, - enable_pod_security_policy=False, - storage_profile=storage_profile_1, - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - raw_param_dict.print_usage_statistics() - - def test_create_mc_preview(self): - mc_1 = self.models.ManagedCluster( - location="test_location", - addon_profiles={ - CONST_MONITORING_ADDON_NAME: self.models.ManagedClusterAddonProfile( - enabled=True, - config={ - CONST_MONITORING_USING_AAD_MSI_AUTH: True, - }, - ) - }, - ) - dec_1 = AKSPreviewCreateDecorator( - self.cmd, - self.client, - { - "resource_group_name": "test_rg_name", - "name": "test_name", - "enable_managed_identity": True, - # "enable_msi_auth_for_monitoring": True, - "no_wait": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - dec_1.context.attach_mc(mc_1) - dec_1.context.set_intermediate( - "monitoring", True, overwrite_exists=True - ) - dec_1.context.set_intermediate( - "subscription_id", "test_subscription_id", overwrite_exists=True - ) - - # raise exception - err_1 = HttpResponseError( - message="not found in Active Directory tenant" - ) - # fail on mock HttpResponseError, max retry exceeded - with self.assertRaises(AzCLIError), patch("time.sleep"), patch( - "azure.cli.command_modules.acs.decorator.AKSCreateDecorator.create_mc" - ), patch( - "azext_aks_preview.decorator.ensure_container_insights_for_monitoring", - side_effect=err_1, - ) as ensure_monitoring: - dec_1.create_mc_preview(mc_1) - ensure_monitoring.assert_called_with( - self.cmd, - mc_1.addon_profiles[CONST_MONITORING_ADDON_NAME], - "test_subscription_id", - "test_rg_name", - "test_name", - "test_location", - remove_monitoring=False, - aad_route=True, - create_dcr=False, - create_dcra=True, - ) - - # raise exception - resp = Mock( - reason="error reason", - status_code=500, - text=Mock(return_value="error text"), - ) - err_2 = HttpResponseError(response=resp) - # fail on mock HttpResponseError - with self.assertRaises(HttpResponseError), patch("time.sleep",), patch( - "azure.cli.command_modules.acs.decorator.AKSCreateDecorator.create_mc" - ), patch( - "azext_aks_preview.decorator.ensure_container_insights_for_monitoring", - side_effect=[err_1, err_2], - ): - dec_1.create_mc_preview(mc_1) - - # return mc - with patch( - "azure.cli.command_modules.acs.decorator.AKSCreateDecorator.create_mc", - return_value=mc_1, - ), patch( - "azext_aks_preview.decorator.ensure_container_insights_for_monitoring", - ): - self.assertEqual(dec_1.create_mc_preview(mc_1), mc_1) - - -class AKSPreviewUpdateDecoratorTestCase(unittest.TestCase): - def setUp(self): - # manually register CUSTOM_MGMT_AKS_PREVIEW - register_aks_preview_resource_type() - self.cli_ctx = MockCLI() - self.cmd = MockCmd(self.cli_ctx) - self.models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW) - self.client = MockClient() - - def test_check_raw_parameters(self): - # default value in `aks_create` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on no updated parameter provided - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=False, - ),self.assertRaises(RequiredArgumentMissingError): - dec_1.check_raw_parameters() - - # unless user says they want to reconcile - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=True, - ): - dec_1.check_raw_parameters() - - # custom value - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "cluster_autoscaler_profile": {}, - "api_server_authorized_ip_ranges": "", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - dec_2.check_raw_parameters() - - def test_update_load_balancer_profile(self): - # default value in `aks_update` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_sku": None, - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - "load_balancer_outbound_ports": None, - "load_balancer_idle_timeout": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_load_balancer_profile(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile(), - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_load_balancer_profile(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile(), - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - outbound ip prefixes - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": "id3,id4", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_ip_prefixes=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" - )( - public_ip_prefixes=[ - self.models.lb_models.get("ResourceReference")( - id="id1" - ), - self.models.lb_models.get("ResourceReference")( - id="id2" - ), - ] - ) - ) - ), - ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_load_balancer_profile(mc_2) - - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_ip_prefixes=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" - )( - public_ip_prefixes=[ - self.models.lb_models.get("ResourceReference")( - id="id3" - ), - self.models.lb_models.get("ResourceReference")( - id="id4" - ), - ] - ) - ) - ), - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - # custom value - outbound ip - dec_3 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": "id3,id4", - "load_balancer_outbound_ip_prefixes": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPs" - )( - public_i_ps=[ - self.models.lb_models.get("ResourceReference")( - id="id1" - ), - self.models.lb_models.get("ResourceReference")( - id="id2" - ), - ] - ) - ) - ), - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_load_balancer_profile(mc_3) - - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPs" - )( - public_i_ps=[ - self.models.lb_models.get("ResourceReference")( - id="id3" - ), - self.models.lb_models.get("ResourceReference")( - id="id4" - ), - ] - ) - ) - ), - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - # custom value - managed outbound ip, count only - dec_4 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": 5, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_4 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=20), - ) - ), - ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_load_balancer_profile(mc_4) - - ground_truth_mc_4 = self.models.ManagedCluster( - location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=5, count_ipv6=20), - ) - ) - ), - ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - - # custom value - managed outbound ip, count_ipv6 only - dec_5 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": 5, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_5 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=20), - ) - ), - ) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_load_balancer_profile(mc_5) - - ground_truth_mc_5 = self.models.ManagedCluster( - location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=5), - ) - ) - ), - ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) - - # custom value - managed outbound ip - dec_6 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": 25, - "load_balancer_managed_outbound_ipv6_count": 5, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_6 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=20), - ) - ), - ) - dec_6.context.attach_mc(mc_6) - dec_mc_6 = dec_6.update_load_balancer_profile(mc_6) - - ground_truth_mc_6 = self.models.ManagedCluster( - location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=25, count_ipv6=5), - ) - ) - ), - ) - self.assertEqual(dec_mc_6, ground_truth_mc_6) - - # custom value - from managed outbound ip to outbound ip - dec_7 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": None, - "load_balancer_managed_outbound_ipv6_count": None, - "load_balancer_outbound_ips": "id1,id2", - "load_balancer_outbound_ip_prefixes": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_7 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=3, count_ipv6=2) - ) - ), - ) - dec_7.context.attach_mc(mc_7) - dec_mc_7 = dec_7.update_load_balancer_profile(mc_7) - - ground_truth_mc_7 = self.models.ManagedCluster( - location="test_location", - network_profile=( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPs" - )( - public_i_ps=[ - self.models.lb_models.get("ResourceReference")( - id="id1" - ), - self.models.lb_models.get("ResourceReference")( - id="id2" - ), - ] - ) - ) - ) - ), - ) - self.assertEqual(dec_mc_7, ground_truth_mc_7) - - # custom value - from outbound ip prefix to managed outbound ip - dec_8 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "load_balancer_managed_outbound_ip_count": 10, - "load_balancer_managed_outbound_ipv6_count": 5, - "load_balancer_outbound_ips": None, - "load_balancer_outbound_ip_prefixes": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - load_balancer_profile_8 = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - outbound_ip_prefixes=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" - )( - public_ip_prefixes=[ - self.models.lb_models.get("ResourceReference")( - id="test_public_ip_prefix" - ) - ] - ), - ) - network_profile_8 = self.models.ContainerServiceNetworkProfile( - load_balancer_profile=load_balancer_profile_8 - ) - mc_8 = self.models.ManagedCluster( - location="test_location", network_profile=network_profile_8 - ) - dec_8.context.attach_mc(mc_8) - dec_mc_8 = dec_8.update_load_balancer_profile(mc_8) - - ground_truth_load_balancer_profile_8 = self.models.lb_models.get( - "ManagedClusterLoadBalancerProfile" - )( - managed_outbound_i_ps=self.models.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" - )(count=10, count_ipv6=5), - ) - ground_truth_network_profile_8 = ( - self.models.ContainerServiceNetworkProfile( - load_balancer_profile=ground_truth_load_balancer_profile_8 - ) - ) - ground_truth_mc_8 = self.models.ManagedCluster( - location="test_location", - network_profile=ground_truth_network_profile_8, - ) - self.assertEqual(dec_mc_8, ground_truth_mc_8) - - # custom value - dec_9 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_9 = self.models.ManagedCluster(location="test_location") - dec_9.context.attach_mc(mc_9) - # fail on incomplete mc object (no network profile) - with self.assertRaises(UnknownError): - dec_9.update_load_balancer_profile(mc_9) - - def test_update_pod_security_policy(self): - # default value in `aks_update` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_security_policy": False, - "disable_pod_security_policy": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_pod_security_policy(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=True, - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_pod_security_policy(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=True, - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_security_policy": True, - "disable_pod_security_policy": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=False, - ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_pod_security_policy(mc_2) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=True, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - # custom value - dec_3 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_security_policy": False, - "disable_pod_security_policy": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_3 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=True, - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_pod_security_policy(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - enable_pod_security_policy=False, - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - def test_update_nat_gateway_profile(self): - # default value in `aks_update` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "nat_gateway_managed_outbound_ip_count": None, - "nat_gateway_idle_timeout": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_nat_gateway_profile(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(), - ), - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_nat_gateway_profile(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile(), - ), - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "nat_gateway_managed_outbound_ip_count": 5, - "nat_gateway_idle_timeout": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_2.context.attach_mc(mc_2) - # fail on incomplete mc object (no network profile) - with self.assertRaises(UnknownError): - dec_2.update_nat_gateway_profile(mc_2) - - # custom value - dec_3 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "nat_gateway_managed_outbound_ip_count": 5, - "nat_gateway_idle_timeout": 30, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile( - managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile( - count=10 - ), - idle_timeout_in_minutes=20, - ) - ), - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_nat_gateway_profile(mc_3) - - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - nat_gateway_profile=self.models.nat_gateway_models.ManagedClusterNATGatewayProfile( - managed_outbound_ip_profile=self.models.nat_gateway_models.ManagedClusterManagedOutboundIPProfile( - count=5 - ), - idle_timeout_in_minutes=30, - ) - ), - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - def test_update_windows_profile(self): - # default value in `aks_update` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_ahub": False, - "disable_ahub": False, - "windows_admin_password": None, - "enable_windows_gmsa": False, - "gmsa_dns_server": None, - "gmsa_root_domain_name": None, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_windows_profile(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_windows_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_windows_gmsa": True, - "gmsa_dns_server": "test_gmsa_dns_server", - "gmsa_root_domain_name": "test_gmsa_root_domain_name", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - windows_profile_2 = self.models.ManagedClusterWindowsProfile( - # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")] - admin_username="test_win_admin_name", - admin_password="test_win_admin_password", - license_type="Windows_Server", - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - windows_profile=windows_profile_2, - ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_windows_profile(mc_2) - - ground_truth_gmsa_profile_2 = self.models.WindowsGmsaProfile( - enabled=True, - dns_server="test_gmsa_dns_server", - root_domain_name="test_gmsa_root_domain_name", - ) - ground_truth_windows_profile_2 = self.models.ManagedClusterWindowsProfile( - # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="fake secrets in unit test")] - admin_username="test_win_admin_name", - admin_password="test_win_admin_password", - license_type="Windows_Server", - gmsa_profile=ground_truth_gmsa_profile_2, - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - windows_profile=ground_truth_windows_profile_2, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - # custom value - dec_3 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_windows_gmsa": True, - "gmsa_dns_server": "test_gmsa_dns_server", - "gmsa_root_domain_name": "test_gmsa_root_domain_name", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_3 = self.models.ManagedCluster( - location="test_location", - ) - dec_3.context.attach_mc(mc_3) - # fail on incomplete mc object (no windows profile) - with patch( - "azure.cli.command_modules.acs.decorator.AKSUpdateDecorator.update_windows_profile", return_value=mc_3 - ), self.assertRaises(UnknownError): - dec_3.update_windows_profile(mc_3) - - def test_update_pod_identity_profile(self): - # default value in `aks_update` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_identity": False, - "disable_pod_identity": False, - "enable_pod_identity_with_kubenet": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_pod_identity_profile(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_pod_identity_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - # custom value - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_identity": True, - "disable_pod_identity": False, - "enable_pod_identity_with_kubenet": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_2 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - ), - ) - dec_2.context.attach_mc(mc_2) - with self.assertRaises(CLIError): - dec_2.update_pod_identity_profile(mc_2) - - # custom value - dec_3 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_identity": True, - "disable_pod_identity": False, - "enable_pod_identity_with_kubenet": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - ), - identity=self.models.ManagedClusterIdentity( - type="SystemAssigned", - ), - ) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_pod_identity_profile(mc_3) - ground_truth_mc_3 = self.models.ManagedCluster( - location="test_location", - network_profile=self.models.ContainerServiceNetworkProfile( - network_plugin="kubenet", - ), - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - allow_network_plugin_kubenet=True, - user_assigned_identities=[], - user_assigned_identity_exceptions=[], - ), - identity=self.models.ManagedClusterIdentity( - type="SystemAssigned", - ), - ) - self.assertEqual(dec_mc_3, ground_truth_mc_3) - - # custom value - dec_4 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_pod_identity": False, - "disable_pod_identity": True, - "enable_pod_identity_with_kubenet": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mc_4 = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=True, - user_assigned_identities=[], - user_assigned_identity_exceptions=[], - ), - ) - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_pod_identity_profile(mc_4) - ground_truth_mc_4 = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - enabled=False, - ), - ) - self.assertEqual(dec_mc_4, ground_truth_mc_4) - - def test_update_oidc_issuer_profile__default_value(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNone(updated_mc.oidc_issuer_profile) - - def test_update_oidc_issuer_profile__default_value_mc_enabled(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True - ) - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNone(updated_mc.oidc_issuer_profile) - - def test_update_oidc_issuer_profile__enabled(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_oidc_issuer": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNotNone(updated_mc.oidc_issuer_profile) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) - - def test_update_oidc_issuer_profile__enabled_mc_enabled(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_oidc_issuer": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile( - enabled=True - ) - dec.context.attach_mc(mc) - updated_mc = dec.update_oidc_issuer_profile(mc) - self.assertIsNotNone(updated_mc.oidc_issuer_profile) - self.assertTrue(updated_mc.oidc_issuer_profile.enabled) - - def test_update_workload_identity_profile__default_value(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertIsNone(updated_mc.security_profile) - - def test_update_workload_identity_profile__default_value_mc_enabled(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.security_profile = self.models.ManagedClusterSecurityProfile( - workload_identity=self.models.ManagedClusterSecurityProfileWorkloadIdentity( - enabled=True, - ) - ) - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertIsNone(updated_mc.security_profile.workload_identity) - - def test_update_workload_identity_profile__enabled(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, self.client, - { - "enable_workload_identity": True, - }, - CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile(enabled=True) - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertTrue(updated_mc.security_profile.workload_identity.enabled) - - def test_update_workload_identity_profile__disabled(self): - dec = AKSPreviewUpdateDecorator( - self.cmd, self.client, - { - "enable_workload_identity": False, - }, - CUSTOM_MGMT_AKS_PREVIEW - ) - mc = self.models.ManagedCluster(location="test_location") - mc.oidc_issuer_profile = self.models.ManagedClusterOIDCIssuerProfile(enabled=True) - dec.context.attach_mc(mc) - updated_mc = dec.update_workload_identity_profile(mc) - self.assertFalse(updated_mc.security_profile.workload_identity.enabled) - - def test_update_azure_keyvault_kms(self): - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_azure_keyvault_kms(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - key_id_1 = "https://fakekeyvault.vault.azure.net/secrets/fakekeyname/fakekeyversion" - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_azure_keyvault_kms": True, - "azure_keyvault_kms_key_id": key_id_1, - "azure_keyvault_kms_key_vault_network_access": "Private", - "azure_keyvault_kms_key_vault_resource_id": "/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster( - location="test_location", - ) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_azure_keyvault_kms(mc_2) - - ground_truth_azure_keyvault_kms_profile_2 = self.models.AzureKeyVaultKms( - enabled=True, - key_id=key_id_1, - key_vault_network_access="Private", - key_vault_resource_id="/subscriptions/8ecadfc9-d1a3-4ea4-b844-0d9f87e4d7c8/resourceGroups/foo/providers/Microsoft.KeyVault/vaults/foo", - ) - ground_truth_security_profile_2 = self.models.ManagedClusterSecurityProfile( - azure_key_vault_kms=ground_truth_azure_keyvault_kms_profile_2, - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - security_profile=ground_truth_security_profile_2, - ) - - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_update_api_server_access_profile(self): - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_api_server_access_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - apiserver_subnet_id = "/subscriptions/fakesub/resourceGroups/fakerg/providers/Microsoft.Network/virtualNetworks/fakevnet/subnets/apiserver" - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_apiserver_vnet_integration": True, - "apiserver_subnet_id": apiserver_subnet_id, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_2 = self.models.ManagedCluster(location="test_location") - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_api_server_access_profile(mc_2) - ground_truth_api_server_access_profile_2 = self.models.ManagedClusterAPIServerAccessProfile( - enable_vnet_integration=True, - subnet_id=apiserver_subnet_id, - ) - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - api_server_access_profile=ground_truth_api_server_access_profile_2, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - def test_update_identity_profile(self): - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster( - location="test_location", - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.update_identity_profile(mc_1) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - cluster_identity_obj = Mock( - client_id="test_cluster_identity_client_id", - principal_id="test_cluster_identity_object_id", - ) - with patch( - "azure.cli.command_modules.acs.decorator.AKSContext.get_identity_by_msi_client", - side_effect=[cluster_identity_obj], - ), patch( - "azext_aks_preview.decorator._ensure_cluster_identity_permission_on_kubelet_identity", - return_value=None, - ): - dec_2 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "assign_kubelet_identity": "test_assign_kubelet_identity", - "yes": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - cluster_identity = self.models.ManagedClusterIdentity( - type="UserAssigned", - user_assigned_identities={ - "test_assign_identity": {} - }, - ) - mc_2 = self.models.ManagedCluster(location="test_location", identity=cluster_identity) - dec_2.context.attach_mc(mc_2) - dec_mc_2 = dec_2.update_identity_profile(mc_2) - - identity_profile_2 = { - "kubeletidentity": self.models.UserAssignedIdentity( - resource_id="test_assign_kubelet_identity", - ) - } - ground_truth_mc_2 = self.models.ManagedCluster( - location="test_location", - identity=cluster_identity, - identity_profile=identity_profile_2, - ) - self.assertEqual(dec_mc_2, ground_truth_mc_2) - - with patch( - "azext_aks_preview.decorator.prompt_y_n", - return_value=False, - ), self.assertRaises(DecoratorEarlyExitException): - dec_3 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "assign_kubelet_identity": "test_assign_kubelet_identity", - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - cluster_identity = self.models.ManagedClusterIdentity( - type="UserAssigned", - user_assigned_identities={ - "test_assign_identity": {} - }, - ) - mc_3 = self.models.ManagedCluster(location="test_location", identity=cluster_identity) - dec_3.context.attach_mc(mc_3) - dec_mc_3 = dec_3.update_identity_profile(mc_3) - - with self.assertRaises(RequiredArgumentMissingError): - dec_4 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "assign_kubelet_identity": "test_assign_kubelet_identity", - "yes": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_4 = self.models.ManagedCluster(location="test_location") - dec_4.context.attach_mc(mc_4) - dec_mc_4 = dec_4.update_identity_profile(mc_4) - - with patch( - "azure.cli.command_modules.acs.decorator.AKSContext.get_identity_by_msi_client", - side_effect=[cluster_identity_obj], - ), patch( - "azext_aks_preview.decorator._ensure_cluster_identity_permission_on_kubelet_identity", - return_value=None, - ): - dec_5 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "enable_managed_identity": True, - "assign_identity": "test_assign_identity", - "assign_kubelet_identity": "test_assign_kubelet_identity", - "yes": True, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - cluster_identity = self.models.ManagedClusterIdentity( - type="UserAssigned", - user_assigned_identities={ - "test_assign_identity": {} - }, - ) - mc_5 = self.models.ManagedCluster(location="test_location", identity=cluster_identity) - dec_5.context.attach_mc(mc_5) - dec_mc_5 = dec_5.update_identity_profile(mc_5) - - identity_profile_5 = { - "kubeletidentity": self.models.UserAssignedIdentity( - resource_id="test_assign_kubelet_identity", - ) - } - ground_truth_mc_5 = self.models.ManagedCluster( - location="test_location", - identity=cluster_identity, - identity_profile=identity_profile_5, - ) - self.assertEqual(dec_mc_5, ground_truth_mc_5) - - def test_update_workload_auto_scaler_profile(self): - # Throws exception when incorrect mc object is passed. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW) - with self.assertRaisesRegex(CLIInternalError, "^Unexpected mc object with type ''\.$"): - dec.update_workload_auto_scaler_profile(None) - - # Throws exception when the mc object passed does not match the one in context. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW) - with self.assertRaisesRegex(CLIInternalError, "^Inconsistent state detected\. The incoming `mc` is not the same as the `mc` in the context\.$"): - mc_in = self.models.ManagedCluster(location="test_location") - dec.update_workload_auto_scaler_profile(mc_in) - - # Leaves profile as None without raw parameters. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc_in) - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNone(mc_out.workload_auto_scaler_profile) - - # Leaves existing profile untouched without raw parameters. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {}, CUSTOM_MGMT_AKS_PREVIEW) - profile = self.models.ManagedClusterWorkloadAutoScalerProfile( - keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)) - mc_in = self.models.ManagedCluster(location="test_location", workload_auto_scaler_profile = profile) - dec.context.attach_mc(mc_in) - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertEqual(mc_out.workload_auto_scaler_profile, profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Enables keda when enable_keda is True. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc_in) - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Enables keda in existing profile when enable_keda is True. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {"enable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW) - profile = self.models.ManagedClusterWorkloadAutoScalerProfile( - keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=False)) - mc_in = self.models.ManagedCluster(location="test_location", workload_auto_scaler_profile = profile) - dec.context.attach_mc(mc_in) - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertEqual(mc_out.workload_auto_scaler_profile, profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertTrue(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Disables keda when disable_keda is True. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {"disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc_in) - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertFalse(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Disables keda in existing profile when disable_keda is True. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {"disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW) - profile = self.models.ManagedClusterWorkloadAutoScalerProfile( - keda = self.models.ManagedClusterWorkloadAutoScalerProfileKeda(enabled=True)) - mc_in = self.models.ManagedCluster(location="test_location", workload_auto_scaler_profile = profile) - dec.context.attach_mc(mc_in) - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - self.assertEqual(mc_out, mc_in) - self.assertEqual(mc_out.workload_auto_scaler_profile, profile) - self.assertIsNotNone(mc_out.workload_auto_scaler_profile.keda) - self.assertFalse(mc_out.workload_auto_scaler_profile.keda.enabled) - - # Throws exception when both enable_keda and disable_keda are True. - dec = AKSPreviewUpdateDecorator(self.cmd, self.client, {"enable_keda": True, "disable_keda": True}, CUSTOM_MGMT_AKS_PREVIEW) - mc_in = self.models.ManagedCluster(location="test_location") - dec.context.attach_mc(mc_in) - with self.assertRaises(MutuallyExclusiveArgumentError): - mc_out = dec.update_workload_auto_scaler_profile(mc_in) - - def test_patch_mc(self): - # custom value - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - {}, - CUSTOM_MGMT_AKS_PREVIEW, - ) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.patch_mc(None) - - mc_1 = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - user_assigned_identity_exceptions=[ - self.models.pod_identity_models.ManagedClusterPodIdentityException( - name="test_name", - namespace="test_namespace", - pod_labels=None, - ) - ] - ), - ) - dec_1.context.attach_mc(mc_1) - dec_mc_1 = dec_1.patch_mc(mc_1) - - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - pod_identity_profile=self.models.pod_identity_models.ManagedClusterPodIdentityProfile( - user_assigned_identity_exceptions=[ - self.models.pod_identity_models.ManagedClusterPodIdentityException( - name="test_name", - namespace="test_namespace", - pod_labels={}, - ) - ] - ), - ) - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - def test_update_mc_preview_profile(self): - import inspect - - from azext_aks_preview.custom import aks_update - from azure.cli.command_modules.acs.decorator import AKSParamDict - - optional_params = {} - positional_params = [] - for _, v in inspect.signature(aks_update).parameters.items(): - if v.default != v.empty: - optional_params[v.name] = v.default - else: - positional_params.append(v.name) - ground_truth_positional_params = [ - "cmd", - "client", - "resource_group_name", - "name", - ] - self.assertEqual(positional_params, ground_truth_positional_params) - - # prepare a dictionary of default parameters - raw_param_dict = { - "resource_group_name": "test_rg_name", - "name": "test_name", - } - raw_param_dict.update(optional_params) - raw_param_dict = AKSParamDict(raw_param_dict) - - # default value in `update` - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - raw_param_dict, - CUSTOM_MGMT_AKS_PREVIEW, - ) - - mock_profile = Mock( - get_subscription_id=Mock(return_value="1234-5678-9012") - ) - mock_existing_mc = self.models.ManagedCluster( - location="test_location", - agent_pool_profiles=[ - self.models.ManagedClusterAgentPoolProfile( - name="nodepool1", - ) - ], - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - ), - identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), - identity_profile={ - "kubeletidentity": self.models.UserAssignedIdentity( - resource_id="test_resource_id", - client_id="test_client_id", - object_id="test_object_id", - ) - }, - ) - with patch( - "azure.cli.command_modules.acs.decorator.get_rg_location", - return_value="test_location", - ), patch( - "azure.cli.command_modules.acs.decorator.Profile", - return_value=mock_profile, - ), patch( - "azext_aks_preview.decorator.AKSPreviewUpdateDecorator.check_raw_parameters", - return_value=True, - ), patch.object( - self.client, "get", return_value=mock_existing_mc - ): - dec_mc_1 = dec_1.update_mc_preview_profile() - - ground_truth_agent_pool_profile_1 = ( - self.models.ManagedClusterAgentPoolProfile( - name="nodepool1", - ) - ) - ground_truth_network_profile_1 = ( - self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - ) - ) - ground_truth_identity_1 = self.models.ManagedClusterIdentity( - type="SystemAssigned" - ) - ground_truth_identity_profile_1 = { - "kubeletidentity": self.models.UserAssignedIdentity( - resource_id="test_resource_id", - client_id="test_client_id", - object_id="test_object_id", - ) - } - ground_truth_storage_profile_1=self.models.ManagedClusterStorageProfile( - disk_csi_driver = None, - file_csi_driver = None, - snapshot_controller = None - ) - ground_truth_mc_1 = self.models.ManagedCluster( - location="test_location", - agent_pool_profiles=[ground_truth_agent_pool_profile_1], - network_profile=ground_truth_network_profile_1, - identity=ground_truth_identity_1, - identity_profile=ground_truth_identity_profile_1, - storage_profile=ground_truth_storage_profile_1, - ) - raw_param_dict.print_usage_statistics() - self.assertEqual(dec_mc_1, ground_truth_mc_1) - - def test_update_mc_preview(self): - dec_1 = AKSPreviewUpdateDecorator( - self.cmd, - self.client, - { - "resource_group_name": "test_rg_name", - "name": "test_name", - "no_wait": False, - }, - CUSTOM_MGMT_AKS_PREVIEW, - ) - mc_1 = self.models.ManagedCluster( - location="test_location", - agent_pool_profiles=[ - self.models.ManagedClusterAgentPoolProfile( - name="nodepool1", - ) - ], - network_profile=self.models.ContainerServiceNetworkProfile( - load_balancer_sku="standard", - ), - identity=self.models.ManagedClusterIdentity(type="SystemAssigned"), - identity_profile={ - "kubeletidentity": self.models.UserAssignedIdentity( - resource_id="test_resource_id", - client_id="test_client_id", - object_id="test_object_id", - ) - }, - ) - dec_1.context.attach_mc(mc_1) - # fail on passing the wrong mc object - with self.assertRaises(CLIInternalError): - dec_1.update_mc_preview(None) - mock_profile = Mock( - get_subscription_id=Mock(return_value="test_subscription_id") - ) - with patch( - "azure.cli.command_modules.acs.decorator.Profile", - return_value=mock_profile, - ), patch( - "azure.cli.command_modules.acs.decorator._put_managed_cluster_ensuring_permission" - ) as put_mc: - dec_1.update_mc_preview(mc_1) - put_mc.assert_called_with( - self.cmd, - self.client, - "test_subscription_id", - "test_rg_name", - "test_name", - mc_1, - False, - False, - False, - False, - None, - True, - None, - {}, - False, - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_loadbalancer.py b/src/aks-preview/azext_aks_preview/tests/latest/test_loadbalancer.py index b95b656626e..17479b3b343 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_loadbalancer.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_loadbalancer.py @@ -7,7 +7,9 @@ from azext_aks_preview import _loadbalancer as loadbalancer from azext_aks_preview.__init__ import register_aks_preview_resource_type from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW -from azext_aks_preview.decorator import AKSPreviewModels +from azext_aks_preview.managed_cluster_decorator import ( + AKSPreviewManagedClusterModels, +) from azext_aks_preview.tests.latest.mocks import MockCLI, MockCmd @@ -18,7 +20,9 @@ def setUp(self): self.cli_ctx = MockCLI() self.cmd = MockCmd(self.cli_ctx) # store all the models used by nat gateway - self.lb_models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).lb_models + self.load_balancer_models = AKSPreviewManagedClusterModels( + self.cmd, CUSTOM_MGMT_AKS_PREVIEW + ).load_balancer_models def test_configure_load_balancer_profile(self): managed_outbound_ip_count = 5 @@ -28,18 +32,18 @@ def test_configure_load_balancer_profile(self): outbound_ports = 80 idle_timeout = 3600 - # store all the models used by load balancer - ManagedClusterLoadBalancerProfile = self.lb_models.get( - "ManagedClusterLoadBalancerProfile" + # store all the models used by load balancer + ManagedClusterLoadBalancerProfile = ( + self.load_balancer_models.ManagedClusterLoadBalancerProfile ) - ManagedClusterLoadBalancerProfileManagedOutboundIPs = self.lb_models.get( - "ManagedClusterLoadBalancerProfileManagedOutboundIPs" + ManagedClusterLoadBalancerProfileManagedOutboundIPs = ( + self.load_balancer_models.ManagedClusterLoadBalancerProfileManagedOutboundIPs ) - ManagedClusterLoadBalancerProfileOutboundIPs = self.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPs" + ManagedClusterLoadBalancerProfileOutboundIPs = ( + self.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPs ) - ManagedClusterLoadBalancerProfileOutboundIPPrefixes = self.lb_models.get( - "ManagedClusterLoadBalancerProfileOutboundIPPrefixes" + ManagedClusterLoadBalancerProfileOutboundIPPrefixes = ( + self.load_balancer_models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes ) profile = ManagedClusterLoadBalancerProfile() @@ -63,7 +67,7 @@ def test_configure_load_balancer_profile(self): outbound_ports, idle_timeout, profile, - self.lb_models, + self.load_balancer_models, ) self.assertEqual(p.managed_outbound_i_ps.count, 5) diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_natgateway.py b/src/aks-preview/azext_aks_preview/tests/latest/test_natgateway.py index c995457f800..f023922517c 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_natgateway.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_natgateway.py @@ -7,7 +7,9 @@ import azext_aks_preview._natgateway as natgateway from azext_aks_preview.__init__ import register_aks_preview_resource_type from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW -from azext_aks_preview.decorator import AKSPreviewModels +from azext_aks_preview.managed_cluster_decorator import ( + AKSPreviewManagedClusterModels, +) from azext_aks_preview.tests.latest.mocks import MockCLI, MockCmd @@ -18,7 +20,7 @@ def setUp(self): self.cli_ctx = MockCLI() self.cmd = MockCmd(self.cli_ctx) # store all the models used by nat gateway - self.nat_gateway_models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).nat_gateway_models + self.nat_gateway_models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).nat_gateway_models def test_empty_arguments(self): profile = natgateway.create_nat_gateway_profile(None, None, models=self.nat_gateway_models) @@ -41,7 +43,7 @@ def setUp(self): self.cli_ctx = MockCLI() self.cmd = MockCmd(self.cli_ctx) # store all the models used by nat gateway - self.nat_gateway_models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).nat_gateway_models + self.nat_gateway_models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).nat_gateway_models def test_empty_arguments(self): origin_profile = self.nat_gateway_models.ManagedClusterNATGatewayProfile( @@ -88,3 +90,7 @@ def test_nonempty_idle_timeout(self): def test_nonempty_arguments(self): result = natgateway.is_nat_gateway_profile_provided(1, 4) self.assertTrue(result) + + +if __name__ == '__main__': + unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_pod_identity_helpers.py b/src/aks-preview/azext_aks_preview/tests/latest/test_pod_identity_helpers.py index 172e64573c1..9b2845974ef 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_pod_identity_helpers.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_pod_identity_helpers.py @@ -10,7 +10,9 @@ _fill_defaults_for_pod_identity_exceptions, _fill_defaults_for_pod_identity_profile, ) -from azext_aks_preview.decorator import AKSPreviewModels +from azext_aks_preview.managed_cluster_decorator import ( + AKSPreviewManagedClusterModels, +) from azext_aks_preview.tests.latest.mocks import MockCLI, MockCmd @@ -21,7 +23,7 @@ def setUp(self): self.cli_ctx = MockCLI() self.cmd = MockCmd(self.cli_ctx) # store all the models used by nat gateway - self.pod_identity_models = AKSPreviewModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models + self.pod_identity_models = AKSPreviewManagedClusterModels(self.cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models def test_fill_defaults_for_pod_identity_exceptions(self): # get models @@ -107,3 +109,7 @@ def test_fill_defaults_for_pod_identity_profile(self): self.assertEqual(excs[0].name, "test-exc-1") self.assertEqual(excs[1].pod_labels, {}) self.assertEqual(excs[1].name, "test-exc-2") + + +if __name__ == '__main__': + unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py b/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py index c4683f5254c..d1e7a2a0d0e 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/test_validators.py @@ -384,11 +384,13 @@ def test_invalid_azure_keyvault_kms_key_id_with_wrong_object_type(self): validators.validate_azure_keyvault_kms_key_id(namespace) self.assertEqual(str(cm.exception), err) + class AzureKeyVaultKmsKeyVaultResourceIdNamespace: def __init__(self, azure_keyvault_kms_key_vault_resource_id): self.azure_keyvault_kms_key_vault_resource_id = azure_keyvault_kms_key_vault_resource_id + class TestValidateAzureKeyVaultKmsKeyVaultResourceId(unittest.TestCase): def test_invalid_azure_keyvault_kms_key_vault_resource_id(self): invalid_azure_keyvault_kms_key_vault_resource_id = "invalid" @@ -405,5 +407,6 @@ def test_valid_azure_keyvault_kms_key_vault_resource_id(self): validators.validate_azure_keyvault_kms_key_vault_resource_id(namespace) + if __name__ == "__main__": unittest.main() diff --git a/src/aks-preview/azext_aks_preview/tests/latest/utils.py b/src/aks-preview/azext_aks_preview/tests/latest/utils.py index 5871a469b84..69cda720ee8 100644 --- a/src/aks-preview/azext_aks_preview/tests/latest/utils.py +++ b/src/aks-preview/azext_aks_preview/tests/latest/utils.py @@ -4,9 +4,6 @@ # -------------------------------------------------------------------------------------------- import os -import platform -import shutil -import tempfile def get_test_data_file_path(filename):