From f2aa72a8298d52b272ac421195df126d282d441a Mon Sep 17 00:00:00 2001 From: Wei Shi <19400755+TheOnlyWei@users.noreply.github.com> Date: Thu, 3 Aug 2023 02:06:28 -0700 Subject: [PATCH] update connectedk8s for ARM metadata 2022-09-01 (#6328) --- src/connectedk8s/HISTORY.rst | 39 +---- src/connectedk8s/azext_connectedk8s/_utils.py | 75 +++++++-- src/connectedk8s/azext_connectedk8s/custom.py | 142 ++++++++---------- src/connectedk8s/setup.py | 2 +- 4 files changed, 124 insertions(+), 134 deletions(-) diff --git a/src/connectedk8s/HISTORY.rst b/src/connectedk8s/HISTORY.rst index c45c21a06bc..407112b843e 100644 --- a/src/connectedk8s/HISTORY.rst +++ b/src/connectedk8s/HISTORY.rst @@ -1,6 +1,11 @@ .. :changelog: Release History +=============== + +1.4.0 +++++++ +* Added support for reading ARM metadata 2022-09-01. 1.3.20 ++++++ @@ -9,19 +14,16 @@ Release History 1.3.19 ++++++ - * Adding outbound network connectivity check for Cluster Connect (OBO endpoint) 1.3.18 ++++++ - * Cleaning up stale CRDs if present during onboarding (even in absence of azure-arc release) * Adding retries in Helm client download * Added some failures to be classified as userfaults 1.3.17 ++++++ - * Added a spinner which runs while ARM resource is being provisioned * Added additional logging to indicate which step is running @@ -34,26 +36,22 @@ Release History 1.3.15 ++++++ - * Diagnoser Enhancements - storing metadata and KAP CR snapshots , azure-arc helm values , azure-arc ns secret list * Removing circular imports of 1. custom from precheckutils and 2.(precheckutils and troubleshootutils) from utils * Adding back heuristics detection in connect command 1.3.14 ++++++ - * Changing telemetry push interval to 1 hr * Adding two new supported infra values - Windows 10 IoT Enterprise, LTSCWindows 10 Enterprise LTSC * Saving cluster diagnostic checks pod and job logs 1.3.13 ++++++ - * Bumping up the cluster diagnostic checks helm chart version - Nodeselector addition 1.3.12 ++++++ - * Added retries for helm chart pull and config DP POST call * Fix parameterizing for kid in csp method * Bug fix in delete_arc_agents for arm64 parameter @@ -61,7 +59,6 @@ Release History 1.3.11 ++++++ - * Added support for custom AAD token * Removed ARM64 unsupported warning * Increased helm delete timeout for ARM64 clusters @@ -69,121 +66,98 @@ Release History 1.3.10 ++++++ - * Added CLI heuristics change * Added AKS IOT infra support * Bug Fix in precheckutils 1.3.9 ++++++ - * Added DNS and outbound connectivity prechecks in connect command 1.3.8 ++++++ - * Added connectedk8s proxy support for fairfax 1.3.7 ++++++ - * Install new helm release in azure-arc-release NS 1.3.6 ++++++ - * Updated patch behaviour for Azure Hybrid Benefit property 1.3.5 ++++++ - * Added software assurance related changes for AKS HCI * Added parameter for overriding container log path * Updated kubernetes package dependency to 24.2.0 1.3.4 ++++++ - * Fixed a proxy related bug in connectedk8s upgrade 1.3.3 ++++++ - * Added a timeout in force delete's CRD deletion command 1.3.2 ++++++ - * Added force delete command which is an added functionality in connectedk8s delete function 1.3.1 ++++++ - * Updated min cli core version to 2.30.0 1.3.0 ++++++ - * Added private link support 1.2.11 ++++++ - * Increased the timeout of diagnoser job completion to 180 seconds 1.2.10 ++++++ - * Added troubleshoot command which can be used to diagnose Arc enabled K8s clusters 1.2.9 ++++++ - * Add correlation-id parameter to internally track onboarding sources 1.2.8 ++++++ - * Bump up CSP version to 1.3.019103, bump up `pycryptodome` to 3.14.1 to support Python 3.10 1.2.7 ++++++ - * Avoid using packaging module and revert minCliCoreVersion to 2.16.0 1.2.6 ++++++ - * Update minCliCoreVersion to 2.23.0 1.2.5 ++++++ - * Using MSAL based auth for CLI version >= 2.30.0 1.2.4 ++++++ - * Custom cert changes, using "userValues.txt" for existing values in update command instead of --reuse-values, fix to wait for LRO to complete before starting agent installation/deletion 1.2.3 ++++++ - * Fetching the tenantID from subscription object instead of graphclient 1.2.2 ++++++ - * Updated connectedk8s proxy to support mooncake 1.2.1 ++++++ - * Add maxCliCoreVersion as 2.29.0 1.2.0 ++++++ - * Updated CSP version to 1.3.017131 * Updated GA SDK to 2021-10-01 * Updated CSP endpoint to CDN @@ -191,17 +165,14 @@ Release History 1.1.11 ++++++ - * Installing helm binary as part of CLI commands 1.1.10 ++++++ - * Fixed ARM exception telemetry 1.1.9 ++++++ - * Increase onboarding and upgrade timeout 1.1.8 diff --git a/src/connectedk8s/azext_connectedk8s/_utils.py b/src/connectedk8s/azext_connectedk8s/_utils.py index 26b90fc1e59..8da3ef7971e 100644 --- a/src/connectedk8s/azext_connectedk8s/_utils.py +++ b/src/connectedk8s/azext_connectedk8s/_utils.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- +import sys import os import shutil import subprocess @@ -377,21 +378,21 @@ def add_helm_repo(kube_config, kube_context, helm_client_location): raise CLIInternalError("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii")) -def get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood=None, release_train_dogfood=None): +def get_helm_registry(cmd, config_dp_endpoint, release_train_custom=None): # Setting uri - get_chart_location_url = "{}/{}/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(config_dp_endpoint, 'azure-arc-k8sagents') + api_version = "2019-11-01-preview" + chart_location_url_segment = "azure-arc-k8sagents/GetLatestHelmPackagePath?api-version={}".format(api_version) release_train = os.getenv('RELEASETRAIN') if os.getenv('RELEASETRAIN') else 'stable' - if dp_endpoint_dogfood: - get_chart_location_url = "{}/azure-arc-k8sagents/GetLatestHelmPackagePath?api-version=2019-11-01-preview".format(dp_endpoint_dogfood) - if release_train_dogfood: - release_train = release_train_dogfood + chart_location_url = "{}/{}".format(config_dp_endpoint, chart_location_url_segment) + if release_train_custom: + release_train = release_train_custom uri_parameters = ["releaseTrain={}".format(release_train)] resource = cmd.cli_ctx.cloud.endpoints.active_directory_resource_id headers = None if os.getenv('AZURE_ACCESS_TOKEN'): headers = ["Authorization=Bearer {}".format(os.getenv('AZURE_ACCESS_TOKEN'))] # Sending request with retries - r = send_request_with_retries(cmd.cli_ctx, 'post', get_chart_location_url, headers=headers, fault_type=consts.Get_HelmRegistery_Path_Fault_Type, summary='Error while fetching helm chart registry path', uri_parameters=uri_parameters, resource=resource) + r = send_request_with_retries(cmd.cli_ctx, 'post', chart_location_url, headers=headers, fault_type=consts.Get_HelmRegistery_Path_Fault_Type, summary='Error while fetching helm chart registry path', uri_parameters=uri_parameters, resource=resource) if r.content: try: return r.json().get('repositoryPath') @@ -490,18 +491,16 @@ def validate_infrastructure_type(infra): def get_values_file(): - values_file_provided = False values_file = os.getenv('HELMVALUESPATH') if (values_file is not None) and (os.path.isfile(values_file)): - values_file_provided = True logger.warning("Values files detected. Reading additional helm parameters from same.") # trimming required for windows os if (values_file.startswith("'") or values_file.startswith('"')): values_file = values_file[1:] if (values_file.endswith("'") or values_file.endswith('"')): values_file = values_file[:-1] - - return values_file_provided, values_file + return values_file + return None def ensure_namespace_cleanup(): @@ -568,11 +567,12 @@ def cleanup_release_install_namespace_if_exists(): # DO NOT use this method for re-put scenarios. This method involves new NS creation for helm release. For re-put scenarios, brownfield scenario needs to be handled where helm release still stays in default NS -def helm_install_release(chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, - location, onboarding_tenant_id, http_proxy, https_proxy, no_proxy, proxy_cert, private_key_pem, - kube_config, kube_context, no_wait, values_file_provided, values_file, cloud_name, disable_auto_upgrade, - enable_custom_locations, custom_locations_oid, helm_client_location, enable_private_link, onboarding_timeout="600", +def helm_install_release(resource_manager, chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, + cluster_name, location, onboarding_tenant_id, http_proxy, https_proxy, no_proxy, proxy_cert, private_key_pem, + kube_config, kube_context, no_wait, values_file, cloud_name, disable_auto_upgrade, enable_custom_locations, + custom_locations_oid, helm_client_location, enable_private_link, arm_metadata, onboarding_timeout="600", container_log_path=None): + cmd_helm_install = [helm_client_location, "upgrade", "--install", "azure-arc", chart_path, "--set", "global.subscriptionId={}".format(subscription_id), "--set", "global.kubernetesDistro={}".format(kubernetes_distro), @@ -588,6 +588,28 @@ def helm_install_release(chart_path, subscription_id, kubernetes_distro, kuberne "--namespace", "{}".format(consts.Release_Install_Namespace), "--create-namespace", "--output", "json"] + + # Special configurations from 2022-09-01 ARM metadata. + if "dataplaneEndpoints" in arm_metadata: + notification_endpoint = arm_metadata["dataplaneEndpoints"]["arcGlobalNotificationServiceEndpoint"] + config_endpoint = arm_metadata["dataplaneEndpoints"]["arcConfigEndpoint"] + his_endpoint = arm_metadata["dataplaneEndpoints"]["arcHybridIdentityServiceEndpoint"] + if his_endpoint[-1] != "/": + his_endpoint = his_endpoint + "/" + his_endpoint = his_endpoint + f"discovery?location={location}&api-version=1.0-preview" + relay_endpoint = arm_metadata["suffixes"]["relayEndpointSuffix"] + active_directory = arm_metadata["authentication"]["loginEndpoint"] + cmd_helm_install.extend( + [ + "--set", "systemDefaultValues.azureResourceManagerEndpoint={}".format(resource_manager), + "--set", "systemDefaultValues.azureArcAgents.config_dp_endpoint_override={}".format(config_endpoint), + "--set", "systemDefaultValues.clusterconnect-agent.notification_dp_endpoint_override={}".format(notification_endpoint), + "--set", "systemDefaultValues.clusterconnect-agent.relay_endpoint_suffix_override={}".format(relay_endpoint), + "--set", "systemDefaultValues.clusteridentityoperator.his_endpoint_override={}".format(his_endpoint), + "--set", "systemDefaultValues.activeDirectoryEndpoint={}".format(active_directory) + ] + ) + # Add custom-locations related params if enable_custom_locations and not enable_private_link: cmd_helm_install.extend(["--set", "systemDefaultValues.customLocations.enabled=true"]) @@ -596,7 +618,7 @@ def helm_install_release(chart_path, subscription_id, kubernetes_distro, kuberne if enable_private_link is True: cmd_helm_install.extend(["--set", "systemDefaultValues.clusterconnect-agent.enabled=false"]) # To set some other helm parameters through file - if values_file_provided: + if values_file: cmd_helm_install.extend(["-f", values_file]) if disable_auto_upgrade: cmd_helm_install.extend(["--set", "systemDefaultValues.azureArcAgents.autoUpdate={}".format("false")]) @@ -795,3 +817,24 @@ def is_cli_using_msal_auth(): continue return i > j return len(v1.split(".")) == len(v2.split(".")) + + +def get_metadata(arm_endpoint, api_version="2022-09-01"): + metadata_url_suffix = f"/metadata/endpoints?api-version={api_version}" + metadata_endpoint = None + try: + import requests + session = requests.Session() + metadata_endpoint = arm_endpoint + metadata_url_suffix + print(f"Retrieving ARM metadata from: {metadata_endpoint}") + response = session.get(metadata_endpoint) + if response.status_code == 200: + return response.json() + else: + msg = f"ARM metadata endpoint '{metadata_endpoint}' returned status code {response.status_code}." + raise HttpResponseError(msg) + except Exception as err: + msg = f"Failed to request ARM metadata {metadata_endpoint}." + print(msg, file=sys.stderr) + print(f"Please ensure you have network connection. Error: {str(err)}", file=sys.stderr) + arm_exception_handler(err, msg) diff --git a/src/connectedk8s/azext_connectedk8s/custom.py b/src/connectedk8s/azext_connectedk8s/custom.py index e45e74f23a1..5b2272e4e18 100644 --- a/src/connectedk8s/azext_connectedk8s/custom.py +++ b/src/connectedk8s/azext_connectedk8s/custom.py @@ -134,14 +134,12 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat client = cf_connected_cluster_prev_2022_10_01(cmd.cli_ctx, None) # Checking whether optional extra values file has been provided. - values_file_provided, values_file = utils.get_values_file() - - # Validate the helm environment file for Dogfood. - dp_endpoint_dogfood = None - release_train_dogfood = None + values_file = utils.get_values_file() if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: azure_cloud = consts.Azure_DogfoodCloudName - dp_endpoint_dogfood, release_train_dogfood = validate_env_file_dogfood(values_file, values_file_provided) + + arm_metadata = utils.get_metadata(cmd.cli_ctx.cloud.endpoints.resource_manager) + config_dp_endpoint, release_train = get_config_dp_endpoint(cmd, location, values_file, arm_metadata) # Loading the kubeconfig file in kubernetes client configuration load_kube_config(kube_config, kube_context) @@ -308,7 +306,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat cc_response = LongRunningOperation(cmd.cli_ctx)(cc_response) # Disabling cluster-connect if private link is getting enabled if enable_private_link is True: - disable_cluster_connect(cmd, client, resource_group_name, cluster_name, kube_config, kube_context, values_file, values_file_provided, dp_endpoint_dogfood, release_train_dogfood, release_namespace, helm_client_location) + disable_cluster_connect(cmd, client, resource_group_name, cluster_name, kube_config, kube_context, values_file, release_namespace, helm_client_location) return cc_response else: telemetry.set_exception(exception='The kubernetes cluster is already onboarded', fault_type=consts.Cluster_Already_Onboarded_Fault_Type, @@ -349,11 +347,9 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): utils.add_helm_repo(kube_config, kube_context, helm_client_location) - # Setting the config dataplane endpoint - config_dp_endpoint = get_config_dp_endpoint(cmd, location) - # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood, release_train_dogfood) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, release_train) + # Get azure-arc agent version for telemetry azure_arc_agent_version = registry_path.split(':')[1] telemetry.add_extension_event('connectedk8s', {'Context.Default.AzureCLI.AgentVersion': azure_arc_agent_version}) @@ -394,11 +390,10 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat print("Starting to install Azure arc agents on the Kubernetes cluster.") # Install azure-arc agents - utils.helm_install_release(chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, + utils.helm_install_release(cmd.cli_ctx.cloud.endpoints.resource_manager, chart_path, subscription_id, kubernetes_distro, kubernetes_infra, resource_group_name, cluster_name, location, onboarding_tenant_id, http_proxy, https_proxy, no_proxy, proxy_cert, private_key_pem, kube_config, - kube_context, no_wait, values_file_provided, values_file, azure_cloud, disable_auto_upgrade, enable_custom_locations, - custom_locations_oid, helm_client_location, enable_private_link, onboarding_timeout, container_log_path) - + kube_context, no_wait, values_file, azure_cloud, disable_auto_upgrade, enable_custom_locations, + custom_locations_oid, helm_client_location, enable_private_link, arm_metadata, onboarding_timeout, container_log_path) return put_cc_response @@ -413,8 +408,8 @@ def send_cloud_telemetry(cmd): return cloud_name -def validate_env_file_dogfood(values_file, values_file_provided): - if not values_file_provided: +def validate_env_file_dogfood(values_file): + if not values_file: telemetry.set_exception(exception='Helm environment file not provided', fault_type=consts.Helm_Environment_File_Fault_Type, summary='Helm environment file missing') raise ValidationError("Helm environment file is required when using Dogfood environment for onboarding the cluster.", recommendation="Please set the environment variable 'HELMVALUESPATH' to point to the file.") @@ -570,12 +565,29 @@ def connected_cluster_exists(client, resource_group_name, cluster_name): return True -def get_config_dp_endpoint(cmd, location): +def get_default_config_dp_endpoint(cmd, location): cloud_based_domain = cmd.cli_ctx.cloud.endpoints.active_directory.split('.')[2] config_dp_endpoint = "https://{}.dp.kubernetesconfiguration.azure.{}".format(location, cloud_based_domain) return config_dp_endpoint +def get_config_dp_endpoint(cmd, location, values_file, arm_metadata=None): + release_train = None + config_dp_endpoint = None + if arm_metadata is None: + arm_metadata = utils.get_metadata(cmd.cli_ctx.cloud.endpoints.resource_manager) + # Read and validate the helm values file for Dogfood. + if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: + config_dp_endpoint, release_train = validate_env_file_dogfood(values_file) + # Get the values or endpoints required for retreiving the Helm registry URL. + if "dataplaneEndpoints" in arm_metadata: + config_dp_endpoint = arm_metadata["dataplaneEndpoints"]["arcConfigEndpoint"] + # Get the default config dataplane endpoint. + if config_dp_endpoint is None: + config_dp_endpoint = get_default_config_dp_endpoint(cmd, location) + return config_dp_endpoint, release_train + + def get_public_key(key_pair): pubKey = key_pair.publickey() seq = asn1.DerSequence([pubKey.n, pubKey.e]) @@ -984,13 +996,7 @@ def update_connected_cluster(cmd, client, resource_group_name, cluster_name, htt raise MutuallyExclusiveArgumentError(consts.EnableProxy_Conflict_Error) # Checking whether optional extra values file has been provided. - values_file_provided, values_file = utils.get_values_file() - - # Validate the helm environment file for Dogfood. - dp_endpoint_dogfood = None - release_train_dogfood = None - if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: - dp_endpoint_dogfood, release_train_dogfood = validate_env_file_dogfood(values_file, values_file_provided) + values_file = utils.get_values_file() # Loading the kubeconfig file in kubernetes client configuration load_kube_config(kube_config, kube_context) @@ -1026,11 +1032,10 @@ def update_connected_cluster(cmd, client, resource_group_name, cluster_name, htt if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): utils.add_helm_repo(kube_config, kube_context, helm_client_location) - # Setting the config dataplane endpoint - config_dp_endpoint = get_config_dp_endpoint(cmd, connected_cluster.location) + config_dp_endpoint, release_train = get_config_dp_endpoint(cmd, connected_cluster.location, values_file) # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood, release_train_dogfood) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, release_train) reg_path_array = registry_path.split(':') agent_version = reg_path_array[1] @@ -1063,9 +1068,8 @@ def update_connected_cluster(cmd, client, resource_group_name, cluster_name, htt raise CLIInternalError(str.format(consts.Update_Agent_Failure, error_helm_get_values.decode("ascii"))) cmd_helm_upgrade = [helm_client_location, "upgrade", "azure-arc", chart_path, "--namespace", release_namespace, - "-f", - user_values_location, "--wait", "--output", "json"] - if values_file_provided: + "-f", user_values_location, "--wait", "--output", "json"] + if values_file: cmd_helm_upgrade.extend(["-f", values_file]) if auto_upgrade is not None: cmd_helm_upgrade.extend(["--set", "systemDefaultValues.azureArcAgents.autoUpdate={}".format(auto_upgrade)]) @@ -1120,13 +1124,7 @@ def upgrade_agents(cmd, client, resource_group_name, cluster_name, kube_config=N kube_config = set_kube_config(kube_config) # Checking whether optional extra values file has been provided. - values_file_provided, values_file = utils.get_values_file() - - # Validate the helm environment file for Dogfood. - dp_endpoint_dogfood = None - release_train_dogfood = None - if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: - dp_endpoint_dogfood, release_train_dogfood = validate_env_file_dogfood(values_file, values_file_provided) + values_file = utils.get_values_file() # Loading the kubeconfig file in kubernetes client configuration load_kube_config(kube_config, kube_context) @@ -1201,11 +1199,10 @@ def upgrade_agents(cmd, client, resource_group_name, cluster_name, kube_config=N if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): utils.add_helm_repo(kube_config, kube_context, helm_client_location) - # Setting the config dataplane endpoint - config_dp_endpoint = get_config_dp_endpoint(cmd, connected_cluster.location) + config_dp_endpoint, release_train = get_config_dp_endpoint(cmd, connected_cluster.location, values_file) # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood, release_train_dogfood) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, release_train) reg_path_array = registry_path.split(':') agent_version = reg_path_array[1] @@ -1271,7 +1268,7 @@ def upgrade_agents(cmd, client, resource_group_name, cluster_name, kube_config=N if not infra_added: cmd_helm_upgrade.extend(["--set", "global.kubernetesInfra={}".format("generic")]) - if values_file_provided: + if values_file: cmd_helm_upgrade.extend(["-f", values_file]) if kube_config: cmd_helm_upgrade.extend(["--kubeconfig", kube_config]) @@ -1397,13 +1394,7 @@ def enable_features(cmd, client, resource_group_name, cluster_name, features, ku kube_config = set_kube_config(kube_config) # Checking whether optional extra values file has been provided. - values_file_provided, values_file = utils.get_values_file() - - # Validate the helm environment file for Dogfood. - dp_endpoint_dogfood = None - release_train_dogfood = None - if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: - dp_endpoint_dogfood, release_train_dogfood = validate_env_file_dogfood(values_file, values_file_provided) + values_file = utils.get_values_file() # Loading the kubeconfig file in kubernetes client configuration load_kube_config(kube_config, kube_context) @@ -1439,11 +1430,10 @@ def enable_features(cmd, client, resource_group_name, cluster_name, features, ku if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): utils.add_helm_repo(kube_config, kube_context, helm_client_location) - # Setting the config dataplane endpoint - config_dp_endpoint = get_config_dp_endpoint(cmd, connected_cluster.location) + config_dp_endpoint, release_train = get_config_dp_endpoint(cmd, connected_cluster.location, values_file) # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood, release_train_dogfood) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, release_train) reg_path_array = registry_path.split(':') agent_version = reg_path_array[1] @@ -1461,7 +1451,7 @@ def enable_features(cmd, client, resource_group_name, cluster_name, features, ku cmd_helm_upgrade = [helm_client_location, "upgrade", "azure-arc", chart_path, "--namespace", release_namespace, "--reuse-values", "--wait", "--output", "json"] - if values_file_provided: + if values_file: cmd_helm_upgrade.extend(["-f", values_file]) if kube_config: cmd_helm_upgrade.extend(["--kubeconfig", kube_config]) @@ -1508,13 +1498,7 @@ def disable_features(cmd, client, resource_group_name, cluster_name, features, k kube_config = set_kube_config(kube_config) # Checking whether optional extra values file has been provided. - values_file_provided, values_file = utils.get_values_file() - - # Validate the helm environment file for Dogfood. - dp_endpoint_dogfood = None - release_train_dogfood = None - if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: - dp_endpoint_dogfood, release_train_dogfood = validate_env_file_dogfood(values_file, values_file_provided) + values_file = utils.get_values_file() # Loading the kubeconfig file in kubernetes client configuration load_kube_config(kube_config, kube_context) @@ -1563,21 +1547,21 @@ def disable_features(cmd, client, resource_group_name, cluster_name, features, k if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): utils.add_helm_repo(kube_config, kube_context, helm_client_location) - get_chart_and_disable_features(cmd, connected_cluster, dp_endpoint_dogfood, release_train_dogfood, kube_config, kube_context, - helm_client_location, release_namespace, values_file_provided, values_file, disable_azure_rbac, + get_chart_and_disable_features(cmd, connected_cluster, kube_config, kube_context, + helm_client_location, release_namespace, values_file, disable_azure_rbac, disable_cluster_connect, disable_cl) return str.format(consts.Successfully_Disabled_Features, features, connected_cluster.name) -def get_chart_and_disable_features(cmd, connected_cluster, dp_endpoint_dogfood, release_train_dogfood, kube_config, kube_context, - helm_client_location, release_namespace, values_file_provided, values_file, disable_azure_rbac=False, +def get_chart_and_disable_features(cmd, connected_cluster, kube_config, kube_context, + helm_client_location, release_namespace, values_file, disable_azure_rbac=False, disable_cluster_connect=False, disable_cl=False): - # Setting the config dataplane endpoint - config_dp_endpoint = get_config_dp_endpoint(cmd, connected_cluster.location) + + config_dp_endpoint, release_train = get_config_dp_endpoint(cmd, connected_cluster.location, values_file) # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood, release_train_dogfood) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, release_train) reg_path_array = registry_path.split(':') agent_version = reg_path_array[1] @@ -1595,7 +1579,7 @@ def get_chart_and_disable_features(cmd, connected_cluster, dp_endpoint_dogfood, cmd_helm_upgrade = [helm_client_location, "upgrade", "azure-arc", chart_path, "--namespace", release_namespace, "--reuse-values", "--wait", "--output", "json"] - if values_file_provided: + if values_file: cmd_helm_upgrade.extend(["-f", values_file]) if kube_config: cmd_helm_upgrade.extend(["--kubeconfig", kube_config]) @@ -1619,12 +1603,12 @@ def get_chart_and_disable_features(cmd, connected_cluster, dp_endpoint_dogfood, raise CLIInternalError(str.format(consts.Error_disabling_Features, error_helm_upgrade.decode("ascii"))) -def disable_cluster_connect(cmd, client, resource_group_name, cluster_name, kube_config, kube_context, values_file, values_file_provided, dp_endpoint_dogfood, release_train_dogfood, release_namespace, helm_client_location): +def disable_cluster_connect(cmd, client, resource_group_name, cluster_name, kube_config, kube_context, values_file, release_namespace, helm_client_location): # Fetch Connected Cluster for agent version connected_cluster = get_connectedk8s(cmd, client, resource_group_name, cluster_name) - get_chart_and_disable_features(cmd, connected_cluster, dp_endpoint_dogfood, release_train_dogfood, kube_config, kube_context, - helm_client_location, release_namespace, values_file_provided, values_file, False, + get_chart_and_disable_features(cmd, connected_cluster, kube_config, kube_context, + helm_client_location, release_namespace, values_file, False, True, True) @@ -2172,7 +2156,7 @@ def check_cl_registration_and_get_oid(cmd, cl_oid, subscription_id): enable_custom_locations = False except Exception as e: enable_custom_locations = False - logger.warning("Unable to fetch registration state of 'Microsoft.ExtendedLocation'. Failed to enable 'custom-locations' feature...") + logger.warning("Unable to fetch registration state of 'Microsoft.ExtendedLocation'. Failed to enable 'custom-locations' feature. This is fine if not required. Proceeding with helm install.") telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_Registration_Check_Fault_Type, summary='Unable to fetch status of Custom Locations RP registration.') return enable_custom_locations, custom_locations_oid @@ -2307,24 +2291,16 @@ def troubleshoot(cmd, client, resource_group_name, cluster_name, kube_config=Non diagnostic_checks[consts.KAP_Cert_Check] = troubleshootutils.check_kap_cert(corev1_api_instance) # Checking whether optional extra values file has been provided. - values_file_provided, values_file = utils.get_values_file() - - # Validate the helm environment file for Dogfood. - dp_endpoint_dogfood = None - release_train_dogfood = None - if cmd.cli_ctx.cloud.endpoints.resource_manager == consts.Dogfood_RMEndpoint: - consts.Azure_DogfoodCloudName - dp_endpoint_dogfood, release_train_dogfood = validate_env_file_dogfood(values_file, values_file_provided) + values_file = utils.get_values_file() # Adding helm repo if os.getenv('HELMREPONAME') and os.getenv('HELMREPOURL'): utils.add_helm_repo(kube_config, kube_context, helm_client_location) - # Setting the config dataplane endpoint - config_dp_endpoint = get_config_dp_endpoint(cmd, connected_cluster.location) + config_dp_endpoint, release_train = get_config_dp_endpoint(cmd, connected_cluster.location, values_file) # Retrieving Helm chart OCI Artifact location - registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, dp_endpoint_dogfood, release_train_dogfood) + registry_path = os.getenv('HELMREGISTRY') if os.getenv('HELMREGISTRY') else utils.get_helm_registry(cmd, config_dp_endpoint, release_train) # Get azure-arc agent version for telemetry azure_arc_agent_version = registry_path.split(':')[1] diff --git a/src/connectedk8s/setup.py b/src/connectedk8s/setup.py index ba2ecdb5532..5525b03d0ff 100644 --- a/src/connectedk8s/setup.py +++ b/src/connectedk8s/setup.py @@ -17,7 +17,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '1.3.20' +VERSION = '1.4.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers